Beispiel #1
0
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('output_dir=%s', FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    data_dir = FLAGS.data_dir
    dataset_builder = ub.datasets.get(
        FLAGS.dataset,
        download_data=FLAGS.download_data,
        data_dir=data_dir,
        split=tfds.Split.TEST,
        drop_remainder=FLAGS.drop_remainder_for_eval)
    dataset = dataset_builder.load(batch_size=batch_size)
    test_datasets = {'clean': dataset}
    if FLAGS.eval_on_ood:
        ood_dataset_names = FLAGS.ood_dataset
        ood_datasets, steps_per_ood = ood_utils.load_ood_datasets(
            ood_dataset_names,
            dataset_builder,
            1. - FLAGS.train_proportion,
            batch_size,
            drop_remainder=FLAGS.drop_remainder_for_eval)
        test_datasets.update(ood_datasets)
    if FLAGS.dataset == 'cifar100':
        data_dir = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                download_data=FLAGS.download_data,
                data_dir=data_dir,
                severity=severity,
                split=tfds.Split.TEST,
                drop_remainder=FLAGS.drop_remainder_for_eval).load(
                    batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet(input_shape=ds_info.features['image'].shape,
                                  depth=28,
                                  width_multiplier=10,
                                  num_classes=num_classes,
                                  l2=0.,
                                  version=2)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = parse_checkpoint_dir(FLAGS.checkpoint_dir)
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(
                dataset=name.replace('/', '_'),
                member=m)  # ood dataset name has '/'
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                steps = steps_per_eval if 'ood/' not in name else steps_per_ood[
                    name]
                for _ in range(steps):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits.append(model(features, training=False))

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/diversity': rm.metrics.AveragePairwiseDiversity(),
    }
    if FLAGS.eval_on_ood:
        ood_metrics = ood_utils.create_ood_metrics(ood_dataset_names)
        metrics.update(ood_metrics)
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
    for i in range(ensemble_size):
        metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
        metrics['test/accuracy_member_{}'.format(i)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(
                dataset=name.replace('/', '_'),
                member=m)  # ood dataset name has '/'
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        steps = steps_per_eval if 'ood/' not in name else steps_per_ood[name]
        for step in range(steps):
            inputs = next(test_iterator)
            labels = inputs['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
            negative_log_likelihood_metric.add_batch(logits, labels=labels)
            negative_log_likelihood = list(
                negative_log_likelihood_metric.result().values())[0]
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            logits_mean = tf.reduce_mean(logits, axis=0)

            if name == 'clean':
                gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
                gibbs_ce_metric.add_batch(logits, labels=labels)
                gibbs_ce = list(gibbs_ce_metric.result().values())[0]
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)

                for i in range(ensemble_size):
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)
                metrics['test/diversity'].add_batch(per_probs)

            elif name.startswith('ood/'):
                ood_labels = 1 - inputs['is_in_distribution']  # pytype: disable=unsupported-operands
                if FLAGS.dempster_shafer_ood:
                    ood_scores = ood_utils.DempsterShaferUncertainty(
                        logits_mean)
                else:
                    ood_scores = 1 - tf.reduce_max(probs, axis=-1)

                for metric_name, metric in metrics.items():
                    if name in metric_name:
                        metric.update_state(ood_labels, ood_scores)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].add_batch(
                    probs, label=labels)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Results from Robustness Metrics themselves return a dict, so flatten them.
    total_results = utils.flatten_dictionary(total_results)
    logging.info('Metrics: %s', total_results)
Beispiel #2
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores //
                  FLAGS.num_dropout_samples_training)
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    num_classes = ds_info.features['label'].num_classes

    aug_params = {
        'augmix': FLAGS.augmix,
        'aug_count': FLAGS.aug_count,
        'augmix_depth': FLAGS.augmix_depth,
        'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
        'augmix_width': FLAGS.augmix_width,
        'ensemble_size': 1,
        'mixup_alpha': FLAGS.mixup_alpha,
    }
    validation_proportion = 1. - FLAGS.train_proportion
    use_validation_set = validation_proportion > 0.
    train_dataset = data_utils.load_dataset(
        split=tfds.Split.TRAIN,
        name=FLAGS.dataset,
        batch_size=batch_size,
        use_bfloat16=FLAGS.use_bfloat16,
        aug_params=aug_params,
        validation_set=use_validation_set,
        validation_proportion=validation_proportion)
    train_sample_size = ds_info.splits[
        'train'].num_examples * FLAGS.train_proportion
    val_sample_size = ds_info.splits['train'].num_examples - train_sample_size
    if use_validation_set:
        validation_dataset = data_utils.load_dataset(
            split=tfds.Split.VALIDATION,
            name=FLAGS.dataset,
            batch_size=batch_size,
            use_bfloat16=FLAGS.use_bfloat16,
            aug_params=aug_params,
            validation_set=use_validation_set,
            validation_proportion=validation_proportion)
        validation_dataset = strategy.experimental_distribute_dataset(
            validation_dataset)
        steps_per_val = steps_per_epoch = int(val_sample_size / batch_size)
    clean_test_dataset = utils.load_dataset(split=tfds.Split.TEST,
                                            name=FLAGS.dataset,
                                            batch_size=test_batch_size,
                                            use_bfloat16=FLAGS.use_bfloat16)

    train_sample_size = ds_info.splits[
        'train'].num_examples * FLAGS.train_proportion
    steps_per_epoch = int(train_sample_size / batch_size)
    steps_per_epoch = ds_info.splits['train'].num_examples // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_dataset = utils.load_cifar10_c
        else:
            load_c_dataset = functools.partial(utils.load_cifar100_c,
                                               path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset = load_c_dataset(corruption_name=corruption,
                                         corruption_intensity=intensity,
                                         batch_size=test_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        if FLAGS.use_spec_norm:
            logging.info('Use Spectral Normalization with norm bound %.2f',
                         FLAGS.spec_norm_bound)
        if FLAGS.use_gp_layer:
            logging.info('Use GP layer with hidden units %d',
                         FLAGS.gp_hidden_dim)

        model = ub.models.wide_resnet_sngp(
            input_shape=ds_info.features['image'].shape,
            batch_size=batch_size,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            l2=FLAGS.l2,
            use_mc_dropout=FLAGS.use_mc_dropout,
            use_filterwise_dropout=FLAGS.use_filterwise_dropout,
            dropout_rate=FLAGS.dropout_rate,
            use_gp_layer=FLAGS.use_gp_layer,
            gp_input_dim=FLAGS.gp_input_dim,
            gp_hidden_dim=FLAGS.gp_hidden_dim,
            gp_scale=FLAGS.gp_scale,
            gp_bias=FLAGS.gp_bias,
            gp_input_normalization=FLAGS.gp_input_normalization,
            gp_random_feature_type=FLAGS.gp_random_feature_type,
            gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
            gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
            use_spec_norm=FLAGS.use_spec_norm,
            spec_norm_iteration=FLAGS.spec_norm_iteration,
            spec_norm_bound=FLAGS.spec_norm_bound)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/stddev': tf.keras.metrics.Mean(),
        }
        if use_validation_set:
            metrics.update({
                'val/negative_log_likelihood':
                tf.keras.metrics.Mean(),
                'val/accuracy':
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'val/ece':
                um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
                'val/stddev':
                tf.keras.metrics.Mean(),
            })
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator, step):
        """Training StepFn."""
        def step_fn(inputs, step):
            """Per-Replica StepFn."""
            images, labels = inputs

            if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0:
                # Resetting covaraince estimator at the begining of a new epoch.
                model.layers[-1].reset_covariance_matrix()

            if FLAGS.augmix and FLAGS.aug_count >= 1:
                # Index 0 at augmix preprocessing is the unperturbed image.
                images = images[:, 1, ...]
                # This is for the case of combining AugMix and Mixup.
                if FLAGS.mixup_alpha > 0:
                    labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1]
            images = tf.tile(images,
                             [FLAGS.num_dropout_samples_training, 1, 1, 1])
            if FLAGS.mixup_alpha > 0:
                labels = tf.tile(labels,
                                 [FLAGS.num_dropout_samples_training, 1])
            else:
                labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract logits
                    logits, _ = logits
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                if FLAGS.mixup_alpha > 0:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))

                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            if FLAGS.mixup_alpha > 0:
                labels = tf.argmax(labels, axis=-1)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), step))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs

            logits_list = []
            stddev_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)
                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract both
                    logits, covmat = logits
                else:
                    covmat = tf.eye(FLAGS.per_core_batch_size)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                logits = ed.layers.utils.mean_field_logits(
                    logits,
                    covmat,
                    mean_field_factor=FLAGS.gp_mean_field_factor)
                stddev = tf.sqrt(tf.linalg.diag_part(covmat))

                stddev_list.append(stddev)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            stddev_list = tf.stack(stddev_list, axis=0)

            stddev = tf.reduce_mean(stddev_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
                metrics['test/stddev'].update_state(stddev)
            elif dataset_name == 'val':
                metrics['val/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['val/accuracy'].update_state(labels, probs)
                metrics['val/ece'].update_state(labels, probs)
                metrics['val/stddev'].update_state(stddev)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/stddev_{}'.format(
                    dataset_name)].update_state(stddev)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    step_variable = tf.Variable(0, dtype=tf.int32)
    train_iterator = iter(train_dataset)
    start_time = time.time()

    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            step_variable.assign(step)
            # Pass `step` as a tf.Variable to train_step to prevent the tf.function
            # train_step() re-compiling itself at each function call.
            train_step(train_iterator, step_variable)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if use_validation_set:
            datasets_to_evaluate['val'] = validation_dataset
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            steps_per_eval = steps_per_val if dataset_name == 'val' else steps_per_eval
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        if use_validation_set:
            logging.info('Val NLL: %.4f, Accuracy: %.2f%%',
                         metrics['val/negative_log_likelihood'].result(),
                         metrics['val/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)

    final_save_name = os.path.join(FLAGS.output_dir, 'model')
    model.save(final_save_name)
    logging.info('Saved model to %s', final_save_name)
Beispiel #3
0
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    dataset = ub.datasets.get(
        FLAGS.dataset,
        download_data=FLAGS.download_data,
        split=tfds.Split.TEST).load(batch_size=batch_size)
    test_datasets = {'clean': dataset}
    if FLAGS.dataset == 'cifar100':
        data_dir = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                data_dir=data_dir,
                download_data=FLAGS.download_data,
                severity=severity,
                split=tfds.Split.TEST).load(batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet_heteroscedastic(
        input_shape=ds_info.features['image'].shape,
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        l2=0.,
        version=2,
        temperature=FLAGS.temperature,
        num_factors=FLAGS.num_factors,
        num_mc_samples=FLAGS.num_mc_samples)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = parse_checkpoint_dir(FLAGS.checkpoint_dir)
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits.append(model(features, training=False))

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
    for i in range(ensemble_size):
        metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
        metrics['test/accuracy_member_{}'.format(i)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
    test_diversity = {
        'test/disagreement': tf.keras.metrics.Mean(),
        'test/average_kl': tf.keras.metrics.Mean(),
        'test/cosine_similarity': tf.keras.metrics.Mean(),
    }
    metrics.update(test_diversity)

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval):
            labels = next(test_iterator)['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
            negative_log_likelihood_metric.add_batch(logits, labels=labels)
            negative_log_likelihood = list(
                negative_log_likelihood_metric.result().values())[0]
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
                gibbs_ce_metric.add_batch(logits, labels=labels)
                gibbs_ce = list(gibbs_ce_metric.result().values())[0]
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)

                for i in range(ensemble_size):
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)
                    diversity = rm.metrics.AveragePairwiseDiversity()
                    diversity.add_batch(per_probs, num_models=ensemble_size)
                    diversity_results = diversity.result()
                for k, v in diversity_results.items():
                    test_diversity['test/' + k].update_state(v)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].add_batch(
                    probs, label=labels)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Metrics from Robustness Metrics (like ECE) will return a dict with a
    # single key/value, instead of a scalar.
    total_results = {
        k: (list(v.values())[0] if isinstance(v, dict) else v)
        for k, v in total_results.items()
    }
    logging.info('Metrics: %s', total_results)
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)

    train_input_fn = utils.load_input_fn(
        split=tfds.Split.TRAIN,
        name=FLAGS.dataset,
        batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size,
        use_bfloat16=FLAGS.use_bfloat16)
    clean_test_input_fn = utils.load_input_fn(
        split=tfds.Split.TEST,
        name=FLAGS.dataset,
        batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size,
        use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = strategy.experimental_distribute_datasets_from_function(
        train_input_fn)
    test_datasets = {
        'clean':
        strategy.experimental_distribute_datasets_from_function(
            clean_test_input_fn),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_input_fn = utils.load_cifar10_c_input_fn
        else:
            load_c_input_fn = functools.partial(utils.load_cifar100_c_input_fn,
                                                path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                input_fn = load_c_input_fn(
                    corruption_name=corruption,
                    corruption_intensity=intensity,
                    batch_size=FLAGS.per_core_batch_size //
                    FLAGS.ensemble_size,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_datasets_from_function(
                        input_fn))

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = ((FLAGS.per_core_batch_size // FLAGS.ensemble_size) *
                  FLAGS.num_cores)
    train_dataset_size = ds_info.splits['train'].num_examples
    steps_per_epoch = train_dataset_size // batch_size
    test_dataset_size = ds_info.splits['test'].num_examples
    steps_per_eval = test_dataset_size // batch_size
    num_classes = ds_info.features['label'].num_classes

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras model')
        model = ub.models.wide_resnet_rank1(
            input_shape=ds_info.features['image'].shape,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            alpha_initializer=FLAGS.alpha_initializer,
            gamma_initializer=FLAGS.gamma_initializer,
            alpha_regularizer=FLAGS.alpha_regularizer,
            gamma_regularizer=FLAGS.gamma_regularizer,
            use_additive_perturbation=FLAGS.use_additive_perturbation,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            dropout_rate=FLAGS.dropout_rate,
            prior_mean=FLAGS.prior_mean,
            prior_stddev=FLAGS.prior_stddev)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/kl': tf.keras.metrics.Mean(),
            'train/kl_scale': tf.keras.metrics.Mean(),
            'train/elbo': tf.keras.metrics.Mean(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/kl': tf.keras.metrics.Mean(),
            'test/elbo': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.ensemble_size > 1:
            for i in range(FLAGS.ensemble_size):
                metrics['test/nll_member_{}'.format(
                    i)] = tf.keras.metrics.Mean()
                metrics['test/accuracy_member_{}'.format(i)] = (
                    tf.keras.metrics.SparseCategoricalAccuracy())
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/kl_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/elbo_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    def compute_l2_loss(model):
        filtered_variables = []
        for var in model.trainable_variables:
            # Apply l2 on the BN parameters and bias terms. This
            # excludes only fast weight approximate posterior/prior parameters,
            # but pay caution to their naming scheme.
            if ('kernel' in var.name or 'batch_norm' in var.name
                    or 'bias' in var.name):
                filtered_variables.append(tf.reshape(var, (-1, )))
        l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
            tf.concat(filtered_variables, axis=0))
        return l2_loss

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            if FLAGS.ensemble_size > 1:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
                labels = tf.tile(labels, [FLAGS.ensemble_size])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = compute_l2_loss(model)
                kl = sum(model.losses) / train_dataset_size
                kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
                kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
                kl_scale = tf.minimum(1., kl_scale)
                kl_loss = kl_scale * kl

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss + kl_loss
                scaled_loss = loss / strategy.num_replicas_in_sync
                elbo = -(negative_log_likelihood + l2_loss + kl)

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate implementation.
            if FLAGS.fast_weight_lr_multiplier != 1.0:
                grads_and_vars = []
                for grad, var in zip(grads, model.trainable_variables):
                    # Apply different learning rate on the fast weight approximate
                    # posterior/prior parameters. This is excludes BN and slow weights,
                    # but pay caution to the naming scheme.
                    if ('kernel' not in var.name
                            and 'batch_norm' not in var.name
                            and 'bias' not in var.name):
                        grads_and_vars.append(
                            (grad * FLAGS.fast_weight_lr_multiplier, var))
                    else:
                        grads_and_vars.append((grad, var))
                optimizer.apply_gradients(grads_and_vars)
            else:
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/kl'].update_state(kl)
            metrics['train/kl_scale'].update_state(kl_scale)
            metrics['train/elbo'].update_state(elbo)
            metrics['train/loss'].update_state(loss)
            metrics['train/accuracy'].update_state(labels, probs)
            metrics['train/ece'].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            if FLAGS.ensemble_size > 1:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
            logits = tf.reshape([
                model(images, training=False)
                for _ in range(FLAGS.num_eval_samples)
            ], [FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, num_classes])
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)

            if FLAGS.ensemble_size > 1:
                per_probs = tf.reduce_mean(probs,
                                           axis=0)  # marginalize samples
                for i in range(FLAGS.ensemble_size):
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)

            # Negative log marginal likelihood computed in a numerically-stable way.
            labels_broadcasted = tf.broadcast_to(
                labels,
                [FLAGS.num_eval_samples, FLAGS.ensemble_size, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) +
                tf.math.log(float(FLAGS.num_eval_samples *
                                  FLAGS.ensemble_size)))
            probs = tf.math.reduce_mean(probs, axis=[0, 1])  # marginalize

            l2_loss = compute_l2_loss(model)
            kl = sum(model.losses) / test_dataset_size
            elbo = -(negative_log_likelihood + l2_loss + kl)

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/kl'].update_state(kl)
                metrics['test/elbo'].update_state(elbo)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/kl_{}'.format(
                    dataset_name)].update_state(kl)
                corrupt_metrics['test/elbo_{}'.format(
                    dataset_name)].update_state(elbo)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)

        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_step(test_iterator, dataset_name)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        if FLAGS.ensemble_size > 1:
            for i in range(FLAGS.ensemble_size):
                logging.info(
                    'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                    metrics['test/nll_member_{}'.format(i)].result(),
                    metrics['test/accuracy_member_{}'.format(i)].result() *
                    100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
Beispiel #5
0
def main(argv):
  del argv  # unused arg
  if not FLAGS.use_gpu:
    raise ValueError('Only GPU is currently supported.')
  if FLAGS.num_cores > 1:
    raise ValueError('Only a single accelerator is currently supported.')
  tf.random.set_seed(FLAGS.seed)
  tf.io.gfile.makedirs(FLAGS.output_dir)

  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

  builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                use_bfloat16=False)
  clean_test_dataset = builder.as_dataset(split=tfds.Split.TEST,
                                          batch_size=batch_size)
  test_datasets = {'clean': clean_test_dataset}
  corruption_types, max_intensity = utils.load_corrupted_test_info()
  for name in corruption_types:
    for intensity in range(1, max_intensity + 1):
      dataset_name = '{0}_{1}'.format(name, intensity)
      test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
          corruption_name=name,
          corruption_intensity=intensity,
          batch_size=batch_size,
          drop_remainder=True,
          use_bfloat16=False)

  model = ub.models.resnet50_deterministic(input_shape=(224, 224, 3),
                                           num_classes=NUM_CLASSES)

  logging.info('Model input shape: %s', model.input_shape)
  logging.info('Model output shape: %s', model.output_shape)
  logging.info('Model number of weights: %s', model.count_params())
  # Search for checkpoints from their index file; then remove the index suffix.
  ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
                                                     '**/*.index'))
  ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
  ensemble_size = len(ensemble_filenames)
  logging.info('Ensemble size: %s', ensemble_size)
  logging.info('Ensemble number of weights: %s',
               ensemble_size * model.count_params())
  logging.info('Ensemble filenames: %s', str(ensemble_filenames))
  checkpoint = tf.train.Checkpoint(model=model)

  # Write model predictions to files.
  num_datasets = len(test_datasets)
  for m, ensemble_filename in enumerate(ensemble_filenames):
    checkpoint.restore(ensemble_filename)
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      if not tf.io.gfile.exists(filename):
        logits = []
        test_iterator = iter(test_dataset)
        for _ in range(steps_per_eval):
          features, _ = next(test_iterator)  # pytype: disable=attribute-error
          logits.append(model(features, training=False))

        logits = tf.concat(logits, axis=0)
        with tf.io.gfile.GFile(filename, 'w') as f:
          np.save(f, logits.numpy())
      percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
      message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                 'Dataset {:d}/{:d}'.format(percent,
                                            m + 1,
                                            ensemble_size,
                                            n + 1,
                                            num_datasets))
      logging.info(message)

  metrics = {
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
      'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
      'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
  }
  corrupt_metrics = {}
  for name in test_datasets:
    corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
    corrupt_metrics['test/accuracy_{}'.format(name)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
    corrupt_metrics['test/ece_{}'.format(
        name)] = um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)

  # Evaluate model predictions.
  for n, (name, test_dataset) in enumerate(test_datasets.items()):
    logits_dataset = []
    for m in range(ensemble_size):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      with tf.io.gfile.GFile(filename, 'rb') as f:
        logits_dataset.append(np.load(f))

    logits_dataset = tf.convert_to_tensor(logits_dataset)
    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      _, labels = next(test_iterator)  # pytype: disable=attribute-error
      logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
      labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
      negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
      per_probs = tf.nn.softmax(logits)
      probs = tf.reduce_mean(per_probs, axis=0)
      if name == 'clean':
        gibbs_ce = um.gibbs_cross_entropy(labels, logits)
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)
      else:
        corrupt_metrics['test/nll_{}'.format(name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(name)].update_state(
            labels, probs)

    message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
        (n + 1) / num_datasets, n + 1, num_datasets))
    logging.info(message)

  corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                    corruption_types,
                                                    max_intensity,
                                                    FLAGS.alexnet_errors_path)
  total_results = {name: metric.result() for name, metric in metrics.items()}
  total_results.update(corrupt_results)
  logging.info('Metrics: %s', total_results)
Beispiel #6
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
    batch_size = per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
    steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    mixup_params = {
        'ensemble_size': FLAGS.ensemble_size,
        'mixup_alpha': FLAGS.mixup_alpha,
        'adaptive_mixup': FLAGS.adaptive_mixup,
        'num_classes': NUM_CLASSES,
    }
    train_builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                        one_hot=(FLAGS.mixup_alpha > 0),
                                        use_bfloat16=FLAGS.use_bfloat16,
                                        mixup_params=mixup_params,
                                        ensemble_size=FLAGS.ensemble_size)
    test_builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                       use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = train_builder.as_dataset(split=tfds.Split.TRAIN,
                                             batch_size=batch_size)
    clean_test_dataset = test_builder.as_dataset(split=tfds.Split.TEST,
                                                 batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset)
    }
    if FLAGS.adaptive_mixup:
        imagenet_confidence_dataset = test_builder.as_dataset(
            split=tfds.Split.VALIDATION,
            batch_size=FLAGS.per_core_batch_size * FLAGS.num_cores)
        imagenet_confidence_dataset = (
            strategy.experimental_distribute_dataset(
                imagenet_confidence_dataset))
    if FLAGS.corruptions_interval > 0:
        corruption_types, max_intensity = utils.load_corrupted_test_info()
        for name in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset_name = '{0}_{1}'.format(name, intensity)
                dataset = utils.load_corrupted_test_dataset(
                    batch_size=batch_size,
                    corruption_name=name,
                    corruption_intensity=intensity,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets[dataset_name] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras ResNet-50 model')
        model = ub.models.resnet_batchensemble(
            input_shape=(224, 224, 3),
            num_classes=NUM_CLASSES,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            use_ensemble_bn=FLAGS.use_ensemble_bn,
            depth=FLAGS.depth)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Scale learning rate and decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 256
        learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr,
                                                   FLAGS.train_epochs,
                                                   _LR_SCHEDULE)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/member_accuracy_mean':
            (tf.keras.metrics.SparseCategoricalAccuracy()),
            'test/member_ece_mean':
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
        }

        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/member_acc_mean_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/member_ece_mean_{}'.format(
                        dataset_name)] = (um.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        test_diversity = {}
        training_diversity = {}
        for i in range(FLAGS.ensemble_size):
            metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
            metrics['test/accuracy_member_{}'.format(i)] = (
                tf.keras.metrics.SparseCategoricalAccuracy())
        test_diversity = {
            'test/disagreement': tf.keras.metrics.Mean(),
            'test/average_kl': tf.keras.metrics.Mean(),
            'test/cosine_similarity': tf.keras.metrics.Mean(),
        }
        training_diversity = {
            'train/disagreement': tf.keras.metrics.Mean(),
            'train/average_kl': tf.keras.metrics.Mean(),
            'train/cosine_similarity': tf.keras.metrics.Mean(),
        }

        logging.info('Finished building Keras ResNet-50 model')

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            if FLAGS.adaptive_mixup:
                images = tf.identity(images)
            else:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            if FLAGS.adaptive_mixup:
                labels = tf.identity(labels)
            elif FLAGS.mixup_alpha > 0:
                labels = tf.tile(labels, [FLAGS.ensemble_size, 1])
            else:
                labels = tf.tile(labels, [FLAGS.ensemble_size])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                probs = tf.nn.softmax(logits)
                per_probs = tf.reshape(
                    probs,
                    tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
                diversity_results = um.average_pairwise_diversity(
                    per_probs, FLAGS.ensemble_size)

                if FLAGS.mixup_alpha > 0:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))
                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the slow weights and bias terms. This excludes BN
                    # parameters and fast weight approximate posterior/prior parameters,
                    # but pay caution to their naming scheme.
                    if 'kernel' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate implementation.
            if FLAGS.fast_weight_lr_multiplier != 1.0:
                grads_and_vars = []
                for grad, var in zip(grads, model.trainable_variables):
                    # Apply different learning rate on the fast weights. This excludes BN
                    # and slow weights, but pay caution to the naming scheme.
                    if ('batch_norm' not in var.name
                            and 'kernel' not in var.name):
                        grads_and_vars.append(
                            (grad * FLAGS.fast_weight_lr_multiplier, var))
                    else:
                        grads_and_vars.append((grad, var))
                optimizer.apply_gradients(grads_and_vars)
            else:
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

            if FLAGS.mixup_alpha > 0:
                labels = tf.argmax(labels, axis=-1)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)
            for k, v in diversity_results.items():
                training_diversity['train/' + k].update_state(v)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
            logits = model(images, training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)

            if dataset_name == 'clean':
                per_probs_tensor = tf.reshape(
                    probs,
                    tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
                diversity_results = um.average_pairwise_diversity(
                    per_probs_tensor, FLAGS.ensemble_size)
                for k, v in diversity_results.items():
                    test_diversity['test/' + k].update_state(v)

            per_probs = tf.split(probs,
                                 num_or_size_splits=FLAGS.ensemble_size,
                                 axis=0)
            probs = tf.reduce_mean(per_probs, axis=0)

            negative_log_likelihood = tf.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

            for i in range(FLAGS.ensemble_size):
                member_probs = per_probs[i]
                if dataset_name == 'clean':
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)
                    metrics['test/member_accuracy_mean'].update_state(
                        labels, member_probs)
                    metrics['test/member_ece_mean'].update_state(
                        labels, member_probs)
                elif dataset_name != 'confidence_validation':
                    corrupt_metrics['test/member_acc_mean_{}'.format(
                        dataset_name)].update_state(labels, member_probs)
                    corrupt_metrics['test/member_ece_mean_{}'.format(
                        dataset_name)].update_state(labels, member_probs)

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            elif dataset_name != 'confidence_validation':
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

            if dataset_name == 'confidence_validation':
                return tf.stack(per_probs, 0), labels

        if dataset_name == 'confidence_validation':
            return strategy.run(step_fn, args=(next(iterator), ))
        else:
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        if FLAGS.adaptive_mixup:
            confidence_set_iterator = iter(imagenet_confidence_dataset)
            predictions_list = []
            labels_list = []
            for step in range(FLAGS.confidence_eval_iterations):
                temp_predictions, temp_labels = test_step(
                    confidence_set_iterator, 'confidence_validation')
                predictions_list.append(temp_predictions)
                labels_list.append(temp_labels)
            predictions = [
                tf.concat(list(predictions_list[i].values), axis=1)
                for i in range(len(predictions_list))
            ]
            labels = [
                tf.concat(list(labels_list[i].values), axis=0)
                for i in range(len(labels_list))
            ]
            predictions = tf.concat(predictions, axis=1)
            labels = tf.cast(tf.concat(labels, axis=0), tf.int64)

            def compute_acc_conf(preds, label, focus_class):
                class_preds = tf.boolean_mask(preds,
                                              label == focus_class,
                                              axis=1)
                class_pred_labels = tf.argmax(class_preds, axis=-1)
                confidence = tf.reduce_mean(
                    tf.reduce_max(class_preds, axis=-1), -1)
                accuracy = tf.reduce_mean(tf.cast(
                    class_pred_labels == focus_class, tf.float32),
                                          axis=-1)
                return accuracy - confidence

            calibration_per_class = [
                compute_acc_conf(predictions, labels, i)
                for i in range(NUM_CLASSES)
            ]
            calibration_per_class = tf.stack(calibration_per_class, axis=1)
            logging.info('calibration per class')
            logging.info(calibration_per_class)
            mixup_coeff = tf.where(calibration_per_class > 0, 1.0,
                                   FLAGS.mixup_alpha)
            mixup_coeff = tf.clip_by_value(mixup_coeff, 0, 1)
            logging.info('mixup coeff')
            logging.info(mixup_coeff)
            mixup_params['mixup_coeff'] = mixup_coeff
            builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                          one_hot=(FLAGS.mixup_alpha > 0),
                                          use_bfloat16=FLAGS.use_bfloat16,
                                          mixup_params=mixup_params)
            train_dataset = builder.as_dataset(split=tfds.Split.TRAIN,
                                               batch_size=batch_size)
            train_dataset = strategy.experimental_distribute_dataset(
                train_dataset)
            train_iterator = iter(train_dataset)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity,
                FLAGS.alexnet_errors_path)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)

        total_metrics = metrics.copy()
        total_metrics.update(training_diversity)
        total_metrics.update(test_diversity)
        total_results = {
            name: metric.result()
            for name, metric in total_metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for _, metric in total_metrics.items():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_save_name = os.path.join(FLAGS.output_dir, 'model')
    model.save(final_save_name)
    logging.info('Saved model to %s', final_save_name)
def main(argv):
    del argv  # unused arg

    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
    batch_size = per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
    steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

    data_dir = FLAGS.data_dir
    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    train_builder = ub.datasets.ImageNetDataset(
        split=tfds.Split.TRAIN,
        use_bfloat16=FLAGS.use_bfloat16,
        data_dir=data_dir)
    train_dataset = train_builder.load(batch_size=batch_size,
                                       strategy=strategy)
    test_builder = ub.datasets.ImageNetDataset(split=tfds.Split.TEST,
                                               use_bfloat16=FLAGS.use_bfloat16,
                                               data_dir=data_dir)
    clean_test_dataset = test_builder.load(batch_size=batch_size,
                                           strategy=strategy)
    test_datasets = {
        'clean': clean_test_dataset,
    }
    if FLAGS.corruptions_interval > 0:
        corruption_types, max_intensity = utils.load_corrupted_test_info()
        for name in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset_name = '{0}_{1}'.format(name, intensity)
                dataset = utils.load_corrupted_test_dataset(
                    batch_size=batch_size,
                    corruption_name=name,
                    corruption_intensity=intensity,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets[dataset_name] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')

    with strategy.scope():
        logging.info('Building Keras ResNet-50 model')
        model = ub.models.resnet50_sngp_be(
            input_shape=(224, 224, 3),
            batch_size=batch_size,
            num_classes=NUM_CLASSES,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            use_ensemble_bn=FLAGS.use_ensemble_bn,
            use_gp_layer=FLAGS.use_gp_layer,
            gp_hidden_dim=FLAGS.gp_hidden_dim,
            gp_scale=FLAGS.gp_scale,
            gp_bias=FLAGS.gp_bias,
            gp_input_normalization=FLAGS.gp_input_normalization,
            gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
            gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
            gp_output_imagenet_initializer=FLAGS.
            gp_output_imagenet_initializer,
            use_spec_norm=FLAGS.use_spec_norm,
            spec_norm_iteration=FLAGS.spec_norm_iteration,
            spec_norm_bound=FLAGS.spec_norm_bound,
            input_spec_norm=FLAGS.input_spec_norm)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Scale learning rate and decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 256
        decay_epochs = [
            (FLAGS.train_epochs * 30) // 90,
            (FLAGS.train_epochs * 60) // 90,
            (FLAGS.train_epochs * 80) // 90,
        ]
        learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch=steps_per_epoch,
            base_learning_rate=base_lr,
            decay_ratio=0.1,
            decay_epochs=decay_epochs,
            warmup_epochs=5)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/stddev':
            tf.keras.metrics.Mean(),
            'test/member_accuracy_mean':
            (tf.keras.metrics.SparseCategoricalAccuracy()),
            'test/member_ece_mean':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/member_acc_mean_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/member_ece_mean_{}'.format(
                        dataset_name)] = (rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        for i in range(FLAGS.ensemble_size):
            metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
            metrics['test/accuracy_member_{}'.format(i)] = (
                tf.keras.metrics.SparseCategoricalAccuracy())

        logging.info('Finished building Keras ResNet-50 model')

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
            labels = tf.tile(labels, [FLAGS.ensemble_size])
            with tf.GradientTape() as tape:
                logits = model(images, training=True)

                if isinstance(logits, (list, tuple)):
                    # If model returns a tuple of (logits, covmat), extract logits
                    logits, _ = logits
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the weights. This excludes BN parameters and biases, but
                    # pay caution to their naming scheme.
                    if 'kernel' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            if FLAGS.fast_weight_lr_multiplier != 1.0:
                grads_and_vars = []
                for grad, var in zip(grads, model.trainable_variables):
                    # Apply different learning rate on the fast weights. This excludes BN
                    # and slow weights, but pay caution to the naming scheme.
                    if ('batch_norm' not in var.name
                            and 'kernel' not in var.name):
                        grads_and_vars.append(
                            (grad * FLAGS.fast_weight_lr_multiplier, var))
                    else:
                        grads_and_vars.append((grad, var))
                optimizer.apply_gradients(grads_and_vars)
            else:
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']

            logits_list = []
            stddev_list = []
            for _ in range(FLAGS.ensemble_size):
                logits = model(images, training=False)

                if isinstance(logits, (list, tuple)):
                    # If model returns a tuple of (logits, covmat), extract both
                    logits, covmat = logits
                else:
                    covmat = tf.eye(FLAGS.per_core_batch_size)

                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                logits = ed.layers.utils.mean_field_logits(
                    logits,
                    covmat,
                    mean_field_factor=FLAGS.gp_mean_field_factor)
                stddev = tf.sqrt(tf.linalg.diag_part(covmat))

                stddev_list.append(stddev)
                logits_list.append(logits)
                member_probs = tf.nn.softmax(logits)
                member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                    labels, member_probs)
                metrics['test/nll_member_{}'.format(i)].update_state(
                    member_loss)
                metrics['test/accuracy_member_{}'.format(i)].update_state(
                    labels, member_probs)
                metrics['test/member_accuracy_mean'].update_state(
                    labels, member_probs)
                metrics['test/member_ece_mean'].add_batch(member_probs,
                                                          label=labels)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            stddev_list = tf.stack(stddev_list, axis=0)

            stddev = tf.reduce_mean(stddev_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels,
                [FLAGS.ensemble_size, tf.shape(labels)[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.ensemble_size)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
                metrics['test/stddev'].update_state(stddev)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)
                corrupt_metrics['test/stddev_{}'.format(
                    dataset_name)].update_state(stddev)

        for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        train_step(train_iterator)

        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_start_time = time.time()
            test_step(test_iterator, dataset_name)
            ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
            metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity,
                FLAGS.alexnet_errors_path)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)

        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    # TODO(jereliu): Convert to use SavedModel after fixing the graph-mode
    # execution bug in SpectralNormalizationConv2D which blocks the model.save()
    # functionality.
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate': FLAGS.base_learning_rate,
            'one_minus_momentum': FLAGS.one_minus_momentum,
            'l2': FLAGS.l2,
            'gp_mean_field_factor': FLAGS.gp_mean_field_factor,
            'gp_scale': FLAGS.gp_scale,
            'gp_hidden_dim': FLAGS.gp_hidden_dim,
            'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier,
            'random_sign_init': FLAGS.random_sign_init,
        })
def main(argv):
    fmt = '[%(filename)s:%(lineno)s] %(message)s'
    formatter = logging.PythonFormatter(fmt)
    logging.get_absl_handler().setFormatter(formatter)
    del argv  # unused arg

    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    data_dir = None
    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        data_dir = FLAGS.data_dir
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    train_dataset_size = (ds_info.splits['train'].num_examples *
                          FLAGS.train_proportion)
    steps_per_epoch = int(train_dataset_size / batch_size)
    logging.info('Steps per epoch %s', steps_per_epoch)
    logging.info('Size of the dataset %s',
                 ds_info.splits['train'].num_examples)
    logging.info('Train proportion %s', FLAGS.train_proportion)
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    aug_params = {
        'augmix': FLAGS.augmix,
        'aug_count': FLAGS.aug_count,
        'augmix_depth': FLAGS.augmix_depth,
        'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
        'augmix_width': FLAGS.augmix_width,
    }

    # Note that stateless_{fold_in,split} may incur a performance cost, but a
    # quick side-by-side test seemed to imply this was minimal.
    seeds = tf.random.experimental.stateless_split(
        [FLAGS.seed, FLAGS.seed + 1], 2)[:, 0]
    train_builder = ub.datasets.get(FLAGS.dataset,
                                    download_data=FLAGS.download_data,
                                    split=tfds.Split.TRAIN,
                                    seed=seeds[0],
                                    aug_params=aug_params,
                                    validation_percent=1. -
                                    FLAGS.train_proportion,
                                    data_dir=data_dir)
    train_dataset = train_builder.load(batch_size=batch_size)
    validation_dataset = None
    steps_per_validation = 0
    if FLAGS.train_proportion < 1.0:
        validation_builder = ub.datasets.get(FLAGS.dataset,
                                             split=tfds.Split.VALIDATION,
                                             validation_percent=1. -
                                             FLAGS.train_proportion,
                                             data_dir=data_dir)
        validation_dataset = validation_builder.load(batch_size=batch_size)
        validation_dataset = strategy.experimental_distribute_dataset(
            validation_dataset)
        steps_per_validation = validation_builder.num_examples // batch_size
    clean_test_builder = ub.datasets.get(FLAGS.dataset,
                                         split=tfds.Split.TEST,
                                         data_dir=data_dir)
    clean_test_dataset = clean_test_builder.load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    steps_per_epoch = train_builder.num_examples // batch_size
    steps_per_eval = clean_test_builder.num_examples // batch_size
    num_classes = 100 if FLAGS.dataset == 'cifar100' else 10
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar100':
            data_dir = FLAGS.cifar100_c_path
        corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
        for corruption_type in corruption_types:
            for severity in range(1, 6):
                dataset = ub.datasets.get(
                    f'{FLAGS.dataset}_corrupted',
                    corruption_type=corruption_type,
                    severity=severity,
                    split=tfds.Split.TEST,
                    data_dir=data_dir).load(batch_size=batch_size)
                test_datasets[f'{corruption_type}_{severity}'] = (
                    strategy.experimental_distribute_dataset(dataset))

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet(input_shape=(32, 32, 3),
                                      depth=28,
                                      width_multiplier=10,
                                      num_classes=num_classes,
                                      l2=FLAGS.l2,
                                      hps=_extract_hyperparameter_dictionary(),
                                      seed=seeds[1],
                                      version=2)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if validation_dataset:
            metrics.update({
                'validation/negative_log_likelihood':
                tf.keras.metrics.Mean(),
                'validation/accuracy':
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'validation/ece':
                rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            })
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, 6):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']

            if FLAGS.augmix and FLAGS.aug_count >= 1:
                # Index 0 at augmix processing is the unperturbed image.
                # We take just 1 augmented image from the returned augmented images.
                images = images[:, 1, ...]
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.label_smoothing == 0.:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    one_hot_labels = tf.one_hot(tf.cast(labels, tf.int32),
                                                num_classes)
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            one_hot_labels,
                            logits,
                            from_logits=True,
                            label_smoothing=FLAGS.label_smoothing))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_split, dataset_name, num_steps):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            logits = model(images, training=False)
            probs = tf.nn.softmax(logits)
            negative_log_likelihood = tf.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

            if dataset_name == 'clean':
                metrics[
                    f'{dataset_split}/negative_log_likelihood'].update_state(
                        negative_log_likelihood)
                metrics[f'{dataset_split}/accuracy'].update_state(
                    labels, probs)
                metrics[f'{dataset_split}/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)

        for _ in tf.range(tf.cast(num_steps, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})
    metrics.update({'train/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    tb_callback = None
    if FLAGS.collect_profile:
        tb_callback = tf.keras.callbacks.TensorBoard(profile_batch=(100, 102),
                                                     log_dir=os.path.join(
                                                         FLAGS.output_dir,
                                                         'logs'))
        tb_callback.set_model(model)
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        if tb_callback:
            tb_callback.on_epoch_begin(epoch)
        train_start_time = time.time()
        train_step(train_iterator)
        ms_per_example = (time.time() - train_start_time) * 1e6 / batch_size
        metrics['train/ms_per_example'].update_state(ms_per_example)

        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)
        if tb_callback:
            tb_callback.on_epoch_end(epoch)

        if validation_dataset:
            validation_iterator = iter(validation_dataset)
            test_step(validation_iterator, 'validation', 'clean',
                      steps_per_validation)
        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_start_time = time.time()
            test_step(test_iterator, 'test', dataset_name, steps_per_eval)
            ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
            metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate': FLAGS.base_learning_rate,
            'one_minus_momentum': FLAGS.one_minus_momentum,
            'l2': FLAGS.l2,
        })
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
    steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    mixup_params = {
        'ensemble_size': 1,
        'mixup_alpha': FLAGS.mixup_alpha,
        'adaptive_mixup': FLAGS.adaptive_mixup,
        'num_classes': NUM_CLASSES,
    }
    train_builder = ub.datasets.ImageNetDataset(
        split=tfds.Split.TRAIN,
        one_hot=(FLAGS.mixup_alpha > 0),
        use_bfloat16=FLAGS.use_bfloat16,
        mixup_params=mixup_params)
    test_builder = ub.datasets.ImageNetDataset(split=tfds.Split.TEST,
                                               use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = train_builder.load(batch_size=batch_size,
                                       strategy=strategy)
    clean_test_dataset = test_builder.load(batch_size=batch_size,
                                           strategy=strategy)
    test_datasets = {
        'clean': clean_test_dataset,
    }
    if FLAGS.adaptive_mixup:
        imagenet_confidence_dataset = ub.datasets.ImageNetDataset(
            split=tfds.Split.VALIDATION,
            run_mixup=True,
            use_bfloat16=FLAGS.use_bfloat16).load(batch_size=batch_size,
                                                  strategy=strategy)
    if FLAGS.corruptions_interval > 0:
        corruption_types, max_intensity = utils.load_corrupted_test_info()
        for name in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset_name = '{0}_{1}'.format(name, intensity)
                dataset = utils.load_corrupted_test_dataset(
                    batch_size=batch_size,
                    corruption_name=name,
                    corruption_intensity=intensity,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets[dataset_name] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    with strategy.scope():
        logging.info('Building Keras ResNet-50 model')
        model = ub.models.resnet50_dropout(
            input_shape=(224, 224, 3),
            num_classes=NUM_CLASSES,
            dropout_rate=FLAGS.dropout_rate,
            filterwise_dropout=FLAGS.filterwise_dropout)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Scale learning rate and decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 256
        decay_epochs = [
            (FLAGS.train_epochs * 30) // 90,
            (FLAGS.train_epochs * 60) // 90,
            (FLAGS.train_epochs * 80) // 90,
        ]
        learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch=steps_per_epoch,
            base_learning_rate=base_lr,
            decay_ratio=0.1,
            decay_epochs=decay_epochs,
            warmup_epochs=5)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))
        logging.info('Finished building Keras ResNet-50 model')

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                if FLAGS.mixup_alpha > 0:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))
                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the weights. This excludes BN parameters and biases, but
                    # pay caution to their naming scheme.
                    if 'kernel' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            if FLAGS.mixup_alpha > 0:
                labels = tf.argmax(labels, axis=-1)
            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']

            logits_list = []
            if dataset_name == 'confidence_validation':
                num_dropout_samples = 1
            else:
                num_dropout_samples = FLAGS.num_dropout_samples
            for _ in range(num_dropout_samples):
                logits = model(images, training=False)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)
            labels_broadcasted = tf.broadcast_to(
                labels, [num_dropout_samples, labels.shape[0]])

            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
            elif dataset_name != 'confidence_validation':
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)

            if dataset_name == 'confidence_validation':
                return tf.reshape(probs, [1, -1, NUM_CLASSES]), labels

        if dataset_name == 'confidence_validation':
            return strategy.run(step_fn, args=(next(iterator), ))
        else:
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        train_step(train_iterator)

        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)

        if FLAGS.adaptive_mixup:
            confidence_set_iterator = iter(imagenet_confidence_dataset)
            predictions_list = []
            labels_list = []
            for step in range(FLAGS.confidence_eval_iterations):
                temp_predictions, temp_labels = test_step(
                    confidence_set_iterator, 'confidence_validation')
                predictions_list.append(temp_predictions)
                labels_list.append(temp_labels)
            predictions = [
                tf.concat(list(predictions_list[i].values), axis=1)
                for i in range(len(predictions_list))
            ]
            labels = [
                tf.concat(list(labels_list[i].values), axis=0)
                for i in range(len(labels_list))
            ]
            predictions = tf.concat(predictions, axis=1)
            labels = tf.cast(tf.concat(labels, axis=0), tf.int64)

            def compute_acc_conf(preds, label, focus_class):
                class_preds = tf.boolean_mask(preds,
                                              label == focus_class,
                                              axis=1)
                class_pred_labels = tf.argmax(class_preds, axis=-1)
                confidence = tf.reduce_mean(
                    tf.reduce_max(class_preds, axis=-1), -1)
                accuracy = tf.reduce_mean(tf.cast(
                    class_pred_labels == focus_class, tf.float32),
                                          axis=-1)
                return accuracy - confidence

            calibration_per_class = [
                compute_acc_conf(predictions, labels, i)
                for i in range(NUM_CLASSES)
            ]
            calibration_per_class = tf.stack(calibration_per_class, axis=1)
            mixup_coeff = tf.where(calibration_per_class > 0, 1.0,
                                   FLAGS.mixup_alpha)
            mixup_coeff = tf.clip_by_value(mixup_coeff, 0, 1)
            mixup_params['mixup_coeff'] = mixup_coeff
            builder = ub.datasets.ImageNetDataset(
                split=tfds.Split.TRAIN,
                one_hot=(FLAGS.mixup_alpha > 0),
                use_bfloat16=FLAGS.use_bfloat16,
                mixup_params=mixup_params)
            train_dataset = builder.load(batch_size=batch_size,
                                         strategy=strategy)
            train_iterator = iter(train_dataset)

        if (epoch + 1) % FLAGS.eval_interval == 0:
            datasets_to_evaluate = {'clean': test_datasets['clean']}
            if (FLAGS.corruptions_interval > 0
                    and (epoch + 1) % FLAGS.corruptions_interval == 0):
                datasets_to_evaluate = test_datasets
            for dataset_name, test_dataset in datasets_to_evaluate.items():
                test_iterator = iter(test_dataset)
                logging.info('Testing on dataset %s', dataset_name)
                for step in range(steps_per_eval):
                    if step % 20 == 0:
                        logging.info(
                            'Starting to run eval step %s of epoch: %s', step,
                            epoch)
                    test_start_time = time.time()
                    test_step(test_iterator, dataset_name)
                    ms_per_example = (time.time() -
                                      test_start_time) * 1e6 / batch_size
                    metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

            corrupt_results = {}
            if (FLAGS.corruptions_interval > 0
                    and (epoch + 1) % FLAGS.corruptions_interval == 0):
                corrupt_results = utils.aggregate_corrupt_metrics(
                    corrupt_metrics, corruption_types, max_intensity,
                    FLAGS.alexnet_errors_path)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_save_name = os.path.join(FLAGS.output_dir, 'model')
    model.save(final_save_name)
    logging.info('Saved model to %s', final_save_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate': FLAGS.base_learning_rate,
            'one_minus_momentum': FLAGS.one_minus_momentum,
            'l2': FLAGS.l2,
            'dropout_rate': FLAGS.dropout_rate,
            'num_dropout_samples': FLAGS.num_dropout_samples,
        })
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size)
    test_datasets = {'clean': dataset}
    extra_kwargs = {}
    if FLAGS.dataset == 'cifar100':
        extra_kwargs['data_dir'] = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                severity=severity,
                split=tfds.Split.TEST,
                **extra_kwargs).load(batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet_sngp(
        input_shape=ds_info.features['image'].shape,
        batch_size=FLAGS.per_core_batch_size,
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        l2=0.,
        use_mc_dropout=FLAGS.use_mc_dropout,
        use_filterwise_dropout=FLAGS.use_filterwise_dropout,
        dropout_rate=FLAGS.dropout_rate,
        use_gp_layer=FLAGS.use_gp_layer,
        gp_input_dim=FLAGS.gp_input_dim,
        gp_hidden_dim=FLAGS.gp_hidden_dim,
        gp_scale=FLAGS.gp_scale,
        gp_bias=FLAGS.gp_bias,
        gp_input_normalization=FLAGS.gp_input_normalization,
        gp_random_feature_type=FLAGS.gp_random_feature_type,
        gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
        gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
        use_spec_norm=FLAGS.use_spec_norm,
        spec_norm_iteration=FLAGS.spec_norm_iteration,
        spec_norm_bound=FLAGS.spec_norm_bound)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits_member = model(features, training=False)
                    if isinstance(logits_member, tuple):
                        # If model returns a tuple of (logits, covmat), extract both
                        logits_member, covmat_member = logits_member
                    else:
                        covmat_member = tf.eye(FLAGS.per_core_batch_size)
                    logits_member = ed.layers.utils.mean_field_logits(
                        logits_member, covmat_member,
                        FLAGS.gp_mean_field_factor_ensemble)
                    logits.append(logits_member)

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval):
            labels = next(test_iterator)['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce = um.gibbs_cross_entropy(labels, logits)
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].update_state(
                    labels, probs)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    logging.info('Metrics: %s', total_results)
Beispiel #11
0
def main(argv):
  del argv  # unused arg
  tf.io.gfile.makedirs(FLAGS.output_dir)
  logging.info('Saving checkpoints at %s', FLAGS.output_dir)
  tf.random.set_seed(FLAGS.seed)
  # Split the seed into a 2-tuple, for passing into dataset builder.
  dataset_seed = (FLAGS.seed, FLAGS.seed + 1)

  data_dir = FLAGS.data_dir
  if FLAGS.use_gpu:
    logging.info('Use GPU')
    strategy = tf.distribute.MirroredStrategy()
  else:
    logging.info('Use TPU at %s',
                 FLAGS.tpu if FLAGS.tpu is not None else 'local')
    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.TPUStrategy(resolver)

  batch_size = FLAGS.total_batch_size // FLAGS.num_dropout_samples_training
  test_batch_size = FLAGS.total_batch_size
  num_classes = 10 if FLAGS.dataset == 'cifar10' else 100

  aug_params = {
      'augmix': FLAGS.augmix,
      'aug_count': FLAGS.aug_count,
      'augmix_depth': FLAGS.augmix_depth,
      'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
      'augmix_width': FLAGS.augmix_width,
      'ensemble_size': 1,
      'mixup_alpha': FLAGS.mixup_alpha,
  }
  validation_proportion = 1. - FLAGS.train_proportion
  use_validation_set = validation_proportion > 0.
  if FLAGS.dataset == 'cifar10':
    dataset_builder_class = ub.datasets.Cifar10Dataset
  else:
    dataset_builder_class = ub.datasets.Cifar100Dataset
  train_dataset_builder = dataset_builder_class(
      data_dir=data_dir,
      download_data=FLAGS.download_data,
      split=tfds.Split.TRAIN,
      use_bfloat16=FLAGS.use_bfloat16,
      aug_params=aug_params,
      validation_percent=validation_proportion,
      shuffle_buffer_size=FLAGS.shuffle_buffer_size,
      seed=dataset_seed)
  train_dataset = train_dataset_builder.load(batch_size=batch_size)
  if validation_proportion > 0.:
    validation_dataset_builder = dataset_builder_class(
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.VALIDATION,
        use_bfloat16=FLAGS.use_bfloat16,
        validation_percent=validation_proportion,
        drop_remainder=FLAGS.drop_remainder_for_eval)
    validation_dataset = validation_dataset_builder.load(
        batch_size=test_batch_size)
    validation_dataset = strategy.experimental_distribute_dataset(
        validation_dataset)
    val_sample_size = validation_dataset_builder.num_examples
    steps_per_val = steps_per_epoch = int(val_sample_size / test_batch_size)
  clean_test_dataset_builder = dataset_builder_class(
      data_dir=data_dir,
      download_data=FLAGS.download_data,
      split=tfds.Split.TEST,
      use_bfloat16=FLAGS.use_bfloat16,
      drop_remainder=FLAGS.drop_remainder_for_eval)
  clean_test_dataset = clean_test_dataset_builder.load(
      batch_size=test_batch_size)

  steps_per_epoch = train_dataset_builder.num_examples // batch_size
  steps_per_eval = clean_test_dataset_builder.num_examples // test_batch_size
  train_dataset = strategy.experimental_distribute_dataset(train_dataset)
  test_datasets = {
      'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
  }

  if FLAGS.eval_on_ood:
    ood_dataset_names = FLAGS.ood_dataset
    ood_ds, steps_per_ood = ood_utils.load_ood_datasets(
        ood_dataset_names,
        clean_test_dataset_builder,
        validation_proportion,
        test_batch_size,
        drop_remainder=FLAGS.drop_remainder_for_eval)
    ood_datasets = {
        name: strategy.experimental_distribute_dataset(ds)
        for name, ds in ood_ds.items()
    }

  if FLAGS.corruptions_interval > 0:
    if FLAGS.dataset == 'cifar100':
      data_dir = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
      for severity in range(1, 6):
        dataset = ub.datasets.get(
            f'{FLAGS.dataset}_corrupted',
            corruption_type=corruption_type,
            severity=severity,
            split=tfds.Split.TEST,
            data_dir=data_dir,
            drop_remainder=FLAGS.drop_remainder_for_eval).load(
                batch_size=batch_size)
        test_datasets[f'{corruption_type}_{severity}'] = (
            strategy.experimental_distribute_dataset(dataset))

  if FLAGS.use_bfloat16:
    tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')

  summary_writer = tf.summary.create_file_writer(
      os.path.join(FLAGS.output_dir, 'summaries'))

  with strategy.scope():
    logging.info('Building ResNet model')
    if FLAGS.use_spec_norm:
      logging.info('Use Spectral Normalization with norm bound %.2f',
                   FLAGS.spec_norm_bound)
    if FLAGS.use_gp_layer:
      logging.info('Use GP layer with hidden units %d', FLAGS.gp_hidden_dim)

    model = ub.models.wide_resnet_sngp(
        input_shape=(32, 32, 3),
        batch_size=batch_size,
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        l2=FLAGS.l2,
        use_mc_dropout=FLAGS.use_mc_dropout,
        use_filterwise_dropout=FLAGS.use_filterwise_dropout,
        dropout_rate=FLAGS.dropout_rate,
        use_gp_layer=FLAGS.use_gp_layer,
        gp_input_dim=FLAGS.gp_input_dim,
        gp_hidden_dim=FLAGS.gp_hidden_dim,
        gp_scale=FLAGS.gp_scale,
        gp_bias=FLAGS.gp_bias,
        gp_input_normalization=FLAGS.gp_input_normalization,
        gp_random_feature_type=FLAGS.gp_random_feature_type,
        gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
        gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
        use_spec_norm=FLAGS.use_spec_norm,
        spec_norm_iteration=FLAGS.spec_norm_iteration,
        spec_norm_bound=FLAGS.spec_norm_bound)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Linearly scale learning rate and the decay epochs by vanilla settings.
    base_lr = FLAGS.base_learning_rate * batch_size / 128
    lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                       for start_epoch_str in FLAGS.lr_decay_epochs]
    lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
        steps_per_epoch,
        base_lr,
        decay_ratio=FLAGS.lr_decay_ratio,
        decay_epochs=lr_decay_epochs,
        warmup_epochs=FLAGS.lr_warmup_epochs)
    optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                        momentum=1.0 - FLAGS.one_minus_momentum,
                                        nesterov=True)
    metrics = {
        'train/negative_log_likelihood': tf.keras.metrics.Mean(),
        'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'train/loss': tf.keras.metrics.Mean(),
        'train/ece': rm.metrics.ExpectedCalibrationError(
            num_bins=FLAGS.num_bins),
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': rm.metrics.ExpectedCalibrationError(
            num_bins=FLAGS.num_bins),
        'test/stddev': tf.keras.metrics.Mean(),
    }
    if use_validation_set:
      metrics.update({
          'val/negative_log_likelihood': tf.keras.metrics.Mean(),
          'val/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
          'val/ece': rm.metrics.ExpectedCalibrationError(
              num_bins=FLAGS.num_bins),
          'val/stddev': tf.keras.metrics.Mean(),
      })
    if FLAGS.eval_on_ood:
      ood_metrics = ood_utils.create_ood_metrics(
          ood_dataset_names, tpr_list=FLAGS.ood_tpr_threshold)
      metrics.update(ood_metrics)
    if FLAGS.corruptions_interval > 0:
      corrupt_metrics = {}
      for intensity in range(1, 6):
        for corruption in corruption_types:
          dataset_name = '{0}_{1}'.format(corruption, intensity)
          corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
              tf.keras.metrics.Mean())
          corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
              tf.keras.metrics.SparseCategoricalAccuracy())
          corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
              rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
          corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
              tf.keras.metrics.Mean())

    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
    initial_epoch = 0
    logging.info('Output dir : %s', FLAGS.output_dir)
    if latest_checkpoint:
      # checkpoint.restore must be within a strategy.scope() so that optimizer
      # slot variables are mirrored.
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
      initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
    if FLAGS.saved_model_dir:
      logging.info('Saved model dir : %s', FLAGS.saved_model_dir)
      latest_checkpoint = tf.train.latest_checkpoint(FLAGS.saved_model_dir)
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
    if FLAGS.eval_only:
      initial_epoch = FLAGS.train_epochs - 1  # Run just one epoch of eval

  @tf.function
  def train_step(iterator, step):
    """Training StepFn."""

    def step_fn(inputs, step):
      """Per-Replica StepFn."""
      images = inputs['features']
      labels = inputs['labels']

      if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0:
        # Resetting covaraince estimator at the begining of a new epoch.
        if FLAGS.use_gp_layer:
          model.layers[-1].reset_covariance_matrix()

      if FLAGS.augmix and FLAGS.aug_count >= 1:
        # Index 0 at augmix preprocessing is the unperturbed image.
        images = images[:, 1, ...]
        # This is for the case of combining AugMix and Mixup.
        if FLAGS.mixup_alpha > 0:
          labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1]
      images = tf.tile(images, [FLAGS.num_dropout_samples_training, 1, 1, 1])
      if FLAGS.mixup_alpha > 0:
        labels = tf.tile(labels, [FLAGS.num_dropout_samples_training, 1])
      else:
        labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])

      with tf.GradientTape() as tape:
        logits = model(images, training=True)
        if isinstance(logits, (list, tuple)):
          # If model returns a tuple of (logits, covmat), extract logits
          logits, _ = logits
        if FLAGS.use_bfloat16:
          logits = tf.cast(logits, tf.float32)
        if FLAGS.mixup_alpha > 0:
          negative_log_likelihood = tf.reduce_mean(
              tf.keras.losses.categorical_crossentropy(labels,
                                                       logits,
                                                       from_logits=True))
        else:
          negative_log_likelihood = tf.reduce_mean(
              tf.keras.losses.sparse_categorical_crossentropy(labels,
                                                              logits,
                                                              from_logits=True))

        l2_loss = sum(model.losses)
        loss = negative_log_likelihood + l2_loss
        # Scale the loss given the TPUStrategy will reduce sum all gradients.
        scaled_loss = loss / strategy.num_replicas_in_sync

      grads = tape.gradient(scaled_loss, model.trainable_variables)
      optimizer.apply_gradients(zip(grads, model.trainable_variables))

      probs = tf.nn.softmax(logits)
      if FLAGS.mixup_alpha > 0:
        labels = tf.argmax(labels, axis=-1)
      metrics['train/ece'].add_batch(probs, label=labels)
      metrics['train/loss'].update_state(loss)
      metrics['train/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['train/accuracy'].update_state(labels, logits)

    strategy.run(step_fn, args=(next(iterator), step))

  @tf.function
  def test_step(iterator, dataset_name, num_steps):
    """Evaluation StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images = inputs['features']
      labels = inputs['labels']

      logits_list = []
      stddev_list = []
      for _ in range(FLAGS.num_dropout_samples):
        logits = model(images, training=False)
        if isinstance(logits, (list, tuple)):
          # If model returns a tuple of (logits, covmat), extract both
          logits, covmat = logits
          if FLAGS.use_bfloat16:
            logits = tf.cast(logits, tf.float32)
          logits = ed.layers.utils.mean_field_logits(
              logits, covmat, mean_field_factor=FLAGS.gp_mean_field_factor)
        else:
          covmat = tf.eye(logits.shape[0])
          if FLAGS.use_bfloat16:
            logits = tf.cast(logits, tf.float32)
        stddev = tf.sqrt(tf.linalg.diag_part(covmat))

        stddev_list.append(stddev)
        logits_list.append(logits)

      # Logits dimension is (num_samples, batch_size, num_classes).
      logits_list = tf.stack(logits_list, axis=0)
      stddev_list = tf.stack(stddev_list, axis=0)

      stddev = tf.reduce_mean(stddev_list, axis=0)
      probs_list = tf.nn.softmax(logits_list)
      probs = tf.reduce_mean(probs_list, axis=0)
      logits = tf.reduce_mean(logits_list, axis=0)

      labels_broadcasted = tf.broadcast_to(
          labels, [FLAGS.num_dropout_samples,
                   tf.shape(labels)[0]])
      log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
          labels_broadcasted, logits_list, from_logits=True)
      negative_log_likelihood = tf.reduce_mean(
          -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
          tf.math.log(float(FLAGS.num_dropout_samples)))

      logging.info('Dataset name : %s', dataset_name)
      if dataset_name == 'clean':
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].add_batch(probs, label=labels)
        metrics['test/stddev'].update_state(stddev)
      elif dataset_name == 'val':
        metrics['val/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['val/accuracy'].update_state(labels, probs)
        metrics['val/ece'].add_batch(probs, label=labels)
        metrics['val/stddev'].update_state(stddev)
      elif dataset_name.startswith('ood/'):
        ood_labels = 1 - inputs['is_in_distribution']
        if FLAGS.dempster_shafer_ood:
          ood_scores = ood_utils.DempsterShaferUncertainty(logits)
        else:
          ood_scores = 1 - tf.reduce_max(probs, axis=-1)

        # Edgecase for if dataset_name contains underscores
        for name, metric in metrics.items():
          if dataset_name in name:
            metric.update_state(ood_labels, ood_scores)
      elif FLAGS.corruptions_interval > 0:
        corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
            probs, label=labels)
        corrupt_metrics['test/stddev_{}'.format(dataset_name)].update_state(
            stddev)

    for _ in tf.range(tf.cast(num_steps, tf.int32)):
      strategy.run(step_fn, args=(next(iterator),))

  metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

  step_variable = tf.Variable(0, dtype=tf.int32)
  train_iterator = iter(train_dataset)
  start_time = time.time()

  for epoch in range(initial_epoch, FLAGS.train_epochs):
    logging.info('Starting to run epoch: %s', epoch)
    if not FLAGS.eval_only:
      for step in range(steps_per_epoch):
        step_variable.assign(step)
        # Pass `step` as a tf.Variable to train_step to prevent the tf.function
        # train_step() re-compiling itself at each function call.
        train_step(train_iterator, step_variable)

        current_step = epoch * steps_per_epoch + (step + 1)
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps,
                       epoch + 1,
                       FLAGS.train_epochs,
                       steps_per_sec,
                       eta_seconds / 60,
                       time_elapsed / 60))
        if step % 20 == 0:
          logging.info(message)

    datasets_to_evaluate = {'clean': test_datasets['clean']}
    if use_validation_set:
      datasets_to_evaluate['val'] = validation_dataset
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      datasets_to_evaluate = test_datasets
    for dataset_name, test_dataset in datasets_to_evaluate.items():
      test_iterator = iter(test_dataset)
      logging.info('Testing on dataset %s', dataset_name)
      steps_per_eval = steps_per_val if dataset_name == 'val' else steps_per_eval
      logging.info('Starting to run eval at epoch: %s', epoch)
      test_start_time = time.time()
      test_step(test_iterator, dataset_name, steps_per_eval)
      ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
      metrics['test/ms_per_example'].update_state(ms_per_example)

      logging.info('Done with testing on %s', dataset_name)

    if FLAGS.eval_on_ood:
      for ood_dataset_name, ood_dataset in ood_datasets.items():
        ood_iterator = iter(ood_dataset)
        logging.info('Calculating OOD on dataset %s', ood_dataset_name)
        logging.info('Running OOD eval at epoch: %s', epoch)
        test_step(ood_iterator, ood_dataset_name,
                  steps_per_ood[ood_dataset_name])

        logging.info('Done with OOD eval on %s', dataset_name)

    corrupt_results = {}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                        corruption_types)

    logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                 metrics['train/loss'].result(),
                 metrics['train/accuracy'].result() * 100)
    if use_validation_set:
      logging.info('Val NLL: %.4f, Accuracy: %.2f%%',
                   metrics['val/negative_log_likelihood'].result(),
                   metrics['val/accuracy'].result() * 100)
    logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                 metrics['test/negative_log_likelihood'].result(),
                 metrics['test/accuracy'].result() * 100)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Metrics from Robustness Metrics (like ECE) will return a dict with a
    # single key/value, instead of a scalar.
    total_results = {
        k: (list(v.values())[0] if isinstance(v, dict) else v)
        for k, v in total_results.items()
    }
    with summary_writer.as_default():
      for name, result in total_results.items():
        tf.summary.scalar(name, result, step=epoch + 1)

    for metric in metrics.values():
      metric.reset_states()

    if FLAGS.corruptions_interval > 0:
      for metric in corrupt_metrics.values():
        metric.reset_states()

    if (FLAGS.checkpoint_interval > 0 and
        (epoch + 1) % FLAGS.checkpoint_interval == 0):
      checkpoint_name = checkpoint.save(
          os.path.join(FLAGS.output_dir, 'checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)

  final_checkpoint_name = checkpoint.save(
      os.path.join(FLAGS.output_dir, 'checkpoint'))
  logging.info('Saved last checkpoint to %s', final_checkpoint_name)

  final_save_name = os.path.join(FLAGS.output_dir, 'model')
  model.save(final_save_name)
  logging.info('Saved model to %s', final_save_name)
  with summary_writer.as_default():
    hp.hparams({
        'base_learning_rate': FLAGS.base_learning_rate,
        'one_minus_momentum': FLAGS.one_minus_momentum,
        'l2': FLAGS.l2,
        'gp_mean_field_factor': FLAGS.gp_mean_field_factor,
    })
Beispiel #12
0
def main(argv):
    del argv  # unused arg
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.enable_v2_behavior()
    tf.random.set_seed(FLAGS.seed)

    dataset_test = utils.ImageNetInput(is_training=False,
                                       data_dir=FLAGS.data_dir,
                                       batch_size=FLAGS.per_core_batch_size,
                                       use_bfloat16=False).input_fn()
    test_datasets = {'clean': dataset_test}

    model = deterministic_model.resnet50(input_shape=(224, 224, 3),
                                         num_classes=NUM_CLASSES)

    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.output_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Collect the logits output for each ensemble member and test data
    # point. We also collect the labels.

    logits_test = {'clean': []}
    labels_test = {'clean': []}
    corruption_types, max_intensity = utils.load_corrupted_test_info()
    for name in corruption_types:
        for intensity in range(1, max_intensity + 1):
            dataset_name = '{0}_{1}'.format(name, intensity)
            logits_test[dataset_name] = []
            labels_test[dataset_name] = []

            test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
                name=name,
                intensity=intensity,
                batch_size=FLAGS.per_core_batch_size,
                drop_remainder=True,
                use_bfloat16=False)

    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        logging.info('Working on test data for ensemble member %s', m)
        for name, test_dataset in test_datasets.items():
            logits = []
            for features, labels in test_dataset:
                logits.append(model(features, training=False))
                if m == 0:
                    labels_test[name].append(labels)

            logits = tf.concat(logits, axis=0)
            logits_test[name].append(logits)
            if m == 0:
                labels_test[name] = tf.concat(labels_test[name], axis=0)
            logging.info('Finished testing on %s', format(name))

    metrics = {
        'test/ece':
        ed.metrics.ExpectedCalibrationError(num_classes=NUM_CLASSES,
                                            num_bins=15)
    }
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/ece_{}'.format(
            name)] = ed.metrics.ExpectedCalibrationError(
                num_classes=NUM_CLASSES, num_bins=15)
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(
            name)] = tf.keras.metrics.Mean()

    for name, test_dataset in test_datasets.items():
        labels = labels_test[name]
        logits = logits_test[name]
        nll_test = ensemble_negative_log_likelihood(labels, logits)
        gibbs_ce_test = gibbs_cross_entropy(labels_test[name],
                                            logits_test[name])
        labels = tf.cast(labels, tf.int32)
        logits = tf.convert_to_tensor(logits)
        per_probs = tf.nn.softmax(logits)
        probs = tf.reduce_mean(per_probs, axis=0)
        accuracy = tf.keras.metrics.sparse_categorical_accuracy(labels, probs)
        if name == 'clean':
            metrics['test/negative_log_likelihood'] = tf.reduce_mean(nll_test)
            metrics['test/gibbs_cross_entropy'] = tf.reduce_mean(gibbs_ce_test)
            metrics['test/accuracy'] = tf.reduce_mean(accuracy)
            metrics['test/ece'].update_state(labels, probs)
        else:
            corrupt_metrics['test/nll_{}'.format(name)].update_state(
                tf.reduce_mean(nll_test))
            corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                tf.reduce_mean(accuracy))
            corrupt_metrics['test/ece_{}'.format(name)].update_state(
                labels, probs)

    corrupt_results = {}
    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types,
                                                      max_intensity)
    metrics['test/ece'] = metrics['test/ece'].result()
    total_results = {name: metric for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    logging.info('Metrics: %s', total_results)
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    ds_info = tfds.builder(FLAGS.dataset).info
    per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
    batch_size = per_core_batch_size * FLAGS.num_cores
    # Train_proportion is a float so need to convert steps_per_epoch to int.
    steps_per_epoch = int(
        (ds_info.splits['train'].num_examples * FLAGS.train_proportion) //
        batch_size)
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    train_dataset = ub.datasets.get(
        FLAGS.dataset,
        split=tfds.Split.TRAIN,
        validation_percent=1. -
        FLAGS.train_proportion).load(batch_size=batch_size)
    clean_test_dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        extra_kwargs = {}
        if FLAGS.dataset == 'cifar100':
            extra_kwargs['data_dir'] = FLAGS.cifar100_c_path
        corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
        for corruption_type in corruption_types:
            for severity in range(1, 6):
                dataset = ub.datasets.get(
                    f'{FLAGS.dataset}_corrupted',
                    corruption_type=corruption_type,
                    severity=severity,
                    split=tfds.Split.TEST,
                    **extra_kwargs).load(batch_size=batch_size)
                test_datasets[f'{corruption_type}_{severity}'] = (
                    strategy.experimental_distribute_dataset(dataset))

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras model')
        model = ub.models.wide_resnet_sngp_be(
            input_shape=ds_info.features['image'].shape,
            batch_size=batch_size,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            l2=FLAGS.l2,
            use_gp_layer=FLAGS.use_gp_layer,
            gp_input_dim=FLAGS.gp_input_dim,
            gp_hidden_dim=FLAGS.gp_hidden_dim,
            gp_scale=FLAGS.gp_scale,
            gp_bias=FLAGS.gp_bias,
            gp_input_normalization=FLAGS.gp_input_normalization,
            gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
            gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
            use_spec_norm=FLAGS.use_spec_norm,
            spec_norm_iteration=FLAGS.spec_norm_iteration,
            spec_norm_bound=FLAGS.spec_norm_bound)

        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/stddev': tf.keras.metrics.Mean(),
        }
        for i in range(FLAGS.ensemble_size):
            metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
            metrics['test/accuracy_member_{}'.format(i)] = (
                tf.keras.metrics.SparseCategoricalAccuracy())
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, 6):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
            labels = tf.tile(labels, [FLAGS.ensemble_size])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract logits
                    logits, _ = logits
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            # Separate learning rate implementation.
            if FLAGS.fast_weight_lr_multiplier != 1.0:
                grads_and_vars = []
                for grad, var in zip(grads, model.trainable_variables):
                    # Apply different learning rate on the fast weight approximate
                    # posterior/prior parameters. This is excludes BN and slow weights,
                    # but pay caution to the naming scheme.
                    if ('batch_norm' not in var.name
                            and 'kernel' not in var.name):
                        grads_and_vars.append(
                            (grad * FLAGS.fast_weight_lr_multiplier, var))
                    else:
                        grads_and_vars.append((grad, var))
                optimizer.apply_gradients(grads_and_vars)
            else:
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']

            logits_list = []
            stddev_list = []

            for i in range(FLAGS.ensemble_size):
                logits = model(images, training=False)
                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract both
                    logits, covmat = logits
                else:
                    covmat = tf.eye(FLAGS.per_core_batch_size)
                logits = mean_field_logits(
                    logits,
                    covmat,
                    mean_field_factor=FLAGS.gp_mean_field_factor)
                stddev = tf.sqrt(tf.linalg.diag_part(covmat))

                stddev_list.append(stddev)
                logits_list.append(logits)

                member_probs = tf.nn.softmax(logits)
                member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                    labels, member_probs)
                metrics['test/nll_member_{}'.format(i)].update_state(
                    member_loss)
                metrics['test/accuracy_member_{}'.format(i)].update_state(
                    labels, member_probs)
            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            stddev_list = tf.stack(stddev_list, axis=0)

            stddev = tf.reduce_mean(stddev_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.ensemble_size, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.ensemble_size)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
                metrics['test/stddev'].update_state(stddev)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/stddev_{}'.format(
                    dataset_name)].update_state(stddev)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
Beispiel #14
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)

    train_input_fn = utils.load_input_fn(
        split=tfds.Split.TRAIN,
        name=FLAGS.dataset,
        batch_size=FLAGS.per_core_batch_size // FLAGS.batch_repetitions,
        use_bfloat16=FLAGS.use_bfloat16)
    clean_test_input_fn = utils.load_input_fn(
        split=tfds.Split.TEST,
        name=FLAGS.dataset,
        batch_size=FLAGS.per_core_batch_size,
        use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = strategy.experimental_distribute_datasets_from_function(
        train_input_fn)
    test_datasets = {
        'clean':
        strategy.experimental_distribute_datasets_from_function(
            clean_test_input_fn),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_input_fn = utils.load_cifar10_c_input_fn
        else:
            load_c_input_fn = functools.partial(utils.load_cifar100_c_input_fn,
                                                path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                input_fn = load_c_input_fn(
                    corruption_name=corruption,
                    corruption_intensity=intensity,
                    batch_size=FLAGS.per_core_batch_size,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_datasets_from_function(
                        input_fn))

    ds_info = tfds.builder(FLAGS.dataset).info
    train_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.batch_repetitions
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    train_dataset_size = ds_info.splits['train'].num_examples
    steps_per_epoch = train_dataset_size // train_batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size
    num_classes = ds_info.features['label'].num_classes

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras model')
        model = ub.models.wide_resnet_mimo(
            input_shape=[FLAGS.ensemble_size] +
            list(ds_info.features['image'].shape),
            depth=28,
            width_multiplier=FLAGS.width_multiplier,
            num_classes=num_classes,
            ensemble_size=FLAGS.ensemble_size)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * train_batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(steps_per_epoch, base_lr,
                                                 FLAGS.lr_decay_ratio,
                                                 lr_decay_epochs,
                                                 FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

        for i in range(FLAGS.ensemble_size):
            metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
            metrics['test/accuracy_member_{}'.format(i)] = (
                tf.keras.metrics.SparseCategoricalAccuracy())
        test_diversity = {
            'test/disagreement': tf.keras.metrics.Mean(),
            'test/average_kl': tf.keras.metrics.Mean(),
            'test/cosine_similarity': tf.keras.metrics.Mean(),
        }

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            batch_size = tf.shape(images)[0]

            main_shuffle = tf.random.shuffle(
                tf.tile(tf.range(batch_size), [FLAGS.batch_repetitions]))
            to_shuffle = tf.cast(
                tf.cast(tf.shape(main_shuffle)[0], tf.float32) *
                (1. - FLAGS.input_repetition_probability), tf.int32)
            shuffle_indices = [
                tf.concat([
                    tf.random.shuffle(main_shuffle[:to_shuffle]),
                    main_shuffle[to_shuffle:]
                ],
                          axis=0) for _ in range(FLAGS.ensemble_size)
            ]
            images = tf.stack([
                tf.gather(images, indices, axis=0)
                for indices in shuffle_indices
            ],
                              axis=1)
            labels = tf.stack([
                tf.gather(labels, indices, axis=0)
                for indices in shuffle_indices
            ],
                              axis=1)

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                negative_log_likelihood = tf.reduce_mean(
                    tf.reduce_sum(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True),
                        axis=1))
                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the BN parameters and bias terms.
                    if ('kernel' in var.name or 'batch_norm' in var.name
                            or 'bias' in var.name):
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(tf.reshape(logits, [-1, num_classes]))
            flat_labels = tf.reshape(labels, [-1])
            metrics['train/ece'].update_state(flat_labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(flat_labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            images = tf.tile(tf.expand_dims(images, 1),
                             [1, FLAGS.ensemble_size, 1, 1, 1])
            logits = model(images, training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)

            if dataset_name == 'clean':
                per_probs = tf.transpose(probs, perm=[1, 0, 2])
                diversity_results = um.average_pairwise_diversity(
                    per_probs, FLAGS.ensemble_size)
                for k, v in diversity_results.items():
                    test_diversity['test/' + k].update_state(v)

            for i in range(FLAGS.ensemble_size):
                member_probs = probs[:, i]
                member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                    labels, member_probs)
                metrics['test/nll_member_{}'.format(i)].update_state(
                    member_loss)
                metrics['test/accuracy_member_{}'.format(i)].update_state(
                    labels, member_probs)

            # Negative log marginal likelihood computed in a numerically-stable way.
            labels_tiled = tf.tile(tf.expand_dims(labels, 1),
                                   [1, FLAGS.ensemble_size])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_tiled, logits, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[1]) +
                tf.math.log(float(FLAGS.ensemble_size)))
            probs = tf.math.reduce_mean(probs, axis=1)  # marginalize

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)

        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * (FLAGS.train_epochs)
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / test_batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)

        metrics.update(test_diversity)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv):
    del argv  # unused arg
    tf.enable_v2_behavior()
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)

    def train_input_fn(ctx):
        """Sets up local (per-core) dataset batching."""
        dataset = utils.load_distributed_dataset(
            split=tfds.Split.TRAIN,
            name=FLAGS.dataset,
            batch_size=FLAGS.per_core_batch_size,
            use_bfloat16=FLAGS.use_bfloat16)
        if ctx and ctx.num_input_pipelines > 1:
            dataset = dataset.shard(ctx.num_input_pipelines,
                                    ctx.input_pipeline_id)
        return dataset

    def clean_test_input_fn(ctx):
        """Sets up local (per-core) dataset batching for testing."""
        dataset = utils.load_distributed_dataset(
            split=tfds.Split.TEST,
            name=FLAGS.dataset,
            batch_size=FLAGS.per_core_batch_size,
            use_bfloat16=FLAGS.use_bfloat16)
        if ctx and ctx.num_input_pipelines > 1:
            dataset = dataset.shard(ctx.num_input_pipelines,
                                    ctx.input_pipeline_id)
        return dataset

    def corrupt_input_fn(corruption_name, corruption_intensity):
        """Returns an input_fn for the given corruption name and intensity."""
        def test_input_fn(ctx):
            """Sets up local (per-core) corrupted dataset batching."""
            if FLAGS.dataset == 'cifar10':
                load_c_dataset = utils.load_cifar10_c_dataset
            else:
                load_c_dataset = functools.partial(
                    utils.load_cifar100_c_dataset, path=FLAGS.cifar100_c_path)
            dataset = load_c_dataset(corruption_name=corruption_name,
                                     corruption_intensity=corruption_intensity,
                                     batch_size=FLAGS.per_core_batch_size,
                                     use_bfloat16=FLAGS.use_bfloat16)
            if ctx and ctx.num_input_pipelines > 1:
                dataset = dataset.shard(ctx.num_input_pipelines,
                                        ctx.input_pipeline_id)
            return dataset

        return test_input_fn

    test_datasets = {
        'clean':
        strategy.experimental_distribute_datasets_from_function(
            clean_test_input_fn),
    }
    if FLAGS.corruptions_interval > 0:
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                input_fn = corrupt_input_fn(corruption, intensity)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_datasets_from_function(
                        input_fn))

    train_dataset = strategy.experimental_distribute_datasets_from_function(
        train_input_fn)
    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    train_dataset_size = ds_info.splits['train'].num_examples
    steps_per_epoch = train_dataset_size // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = wide_resnet(input_shape=ds_info.features['image'].shape,
                            depth=28,
                            width_multiplier=10,
                            num_classes=num_classes,
                            prior_stddev=FLAGS.prior_stddev,
                            dataset_size=train_dataset_size,
                            stddev_init=FLAGS.stddev_init)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(start_epoch * FLAGS.train_epochs) // 200
                           for start_epoch in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            ed.metrics.ExpectedCalibrationError(num_classes=num_classes,
                                                num_bins=FLAGS.num_bins),
            'train/kl':
            tf.keras.metrics.Mean(),
            'train/kl_scale':
            tf.keras.metrics.Mean(),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            ed.metrics.ExpectedCalibrationError(num_classes=num_classes,
                                                num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        ed.metrics.ExpectedCalibrationError(
                            num_classes=num_classes, num_bins=FLAGS.num_bins))

        global_step = tf.Variable(
            0,
            trainable=False,
            name='global_step',
            dtype=tf.int64,
            aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
        checkpoint = tf.train.Checkpoint(model=model,
                                         optimizer=optimizer,
                                         global_step=global_step)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))

                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the BN parameters and bias terms. This
                    # excludes only fast weight approximate posterior/prior parameters,
                    # but pay caution to their naming scheme.
                    if 'batch_norm' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                kl = sum(model.losses)
                kl_scale = tf.cast(global_step + 1, tf.float32)
                kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
                kl_scale = tf.minimum(1., kl_scale)
                kl_loss = kl_scale * kl

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss + kl_loss
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/kl'].update_state(kl)
            metrics['train/kl_scale'].update_state(kl_scale)
            metrics['train/accuracy'].update_state(labels, logits)

            global_step.assign_add(1)

        strategy.experimental_run_v2(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            # TODO(trandustin): Use more eval samples only on corrupted predictions;
            # it's expensive but a one-time compute if scheduled post-training.
            if FLAGS.num_eval_samples > 1 and dataset_name != 'clean':
                logits = tf.stack([
                    model(images, training=False)
                    for _ in range(FLAGS.num_eval_samples)
                ],
                                  axis=0)
            else:
                logits = model(images, training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)
            if FLAGS.num_eval_samples > 1 and dataset_name != 'clean':
                probs = tf.reduce_mean(probs, axis=0)
            negative_log_likelihood = tf.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

        strategy.experimental_run_v2(step_fn, args=(next(iterator), ))

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_step(test_iterator, dataset_name)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
def main(argv):
  del argv  # unused arg
  tf.io.gfile.makedirs(FLAGS.output_dir)
  logging.info('Saving checkpoints at %s', FLAGS.output_dir)
  tf.random.set_seed(FLAGS.seed)

  per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
  batch_size = per_core_batch_size * FLAGS.num_cores

  data_dir = FLAGS.data_dir
  if FLAGS.use_gpu:
    logging.info('Use GPU')
    strategy = tf.distribute.MirroredStrategy()
  else:
    logging.info('Use TPU at %s',
                 FLAGS.tpu if FLAGS.tpu is not None else 'local')
    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.TPUStrategy(resolver)

  train_builder = ub.datasets.get(
      FLAGS.dataset,
      data_dir=data_dir,
      download_data=FLAGS.download_data,
      split=tfds.Split.TRAIN,
      validation_percent=1. - FLAGS.train_proportion)
  train_dataset = train_builder.load(batch_size=batch_size)
  train_dataset = strategy.experimental_distribute_dataset(train_dataset)

  validation_dataset = None
  steps_per_validation = 0
  if FLAGS.train_proportion < 1.0:
    validation_builder = ub.datasets.get(
        FLAGS.dataset,
        data_dir=data_dir,
        split=tfds.Split.VALIDATION,
        validation_percent=1. - FLAGS.train_proportion)
    validation_dataset = validation_builder.load(batch_size=batch_size)
    validation_dataset = strategy.experimental_distribute_dataset(
        validation_dataset)
    steps_per_validation = validation_builder.num_examples // batch_size

  clean_test_builder = ub.datasets.get(
      FLAGS.dataset,
      data_dir=data_dir,
      split=tfds.Split.TEST)
  clean_test_dataset = clean_test_builder.load(batch_size=batch_size)
  test_datasets = {
      'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
  }
  steps_per_epoch = train_builder.num_examples // batch_size
  steps_per_eval = clean_test_builder.num_examples // batch_size
  num_classes = 100 if FLAGS.dataset == 'cifar100' else 10
  if FLAGS.corruptions_interval > 0:
    if FLAGS.dataset == 'cifar100':
      data_dir = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
      for severity in range(1, 6):
        dataset = ub.datasets.get(
            f'{FLAGS.dataset}_corrupted',
            corruption_type=corruption_type,
            data_dir=data_dir,
            severity=severity,
            split=tfds.Split.TEST).load(batch_size=batch_size)
        test_datasets[f'{corruption_type}_{severity}'] = (
            strategy.experimental_distribute_dataset(dataset))

  summary_writer = tf.summary.create_file_writer(
      os.path.join(FLAGS.output_dir, 'summaries'))

  with strategy.scope():
    logging.info('Building Keras model')
    model = ub.models.wide_resnet_batchensemble(
        input_shape=(32, 32, 3),
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        ensemble_size=FLAGS.ensemble_size,
        random_sign_init=FLAGS.random_sign_init,
        l2=FLAGS.l2)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Linearly scale learning rate and the decay epochs by vanilla settings.
    base_lr = FLAGS.base_learning_rate * batch_size / 128
    lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                       for start_epoch_str in FLAGS.lr_decay_epochs]
    lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
        steps_per_epoch,
        base_lr,
        decay_ratio=FLAGS.lr_decay_ratio,
        decay_epochs=lr_decay_epochs,
        warmup_epochs=FLAGS.lr_warmup_epochs)
    optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                        momentum=1.0 - FLAGS.one_minus_momentum,
                                        nesterov=True)
    metrics = {
        'train/negative_log_likelihood': tf.keras.metrics.Mean(),
        'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'train/loss': tf.keras.metrics.Mean(),
        'train/ece': rm.metrics.ExpectedCalibrationError(
            num_bins=FLAGS.num_bins),
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': rm.metrics.ExpectedCalibrationError(
            num_bins=FLAGS.num_bins),
    }
    eval_dataset_splits = ['test']
    if validation_dataset:
      metrics.update({
          'validation/negative_log_likelihood': tf.keras.metrics.Mean(),
          'validation/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
          'validation/ece': rm.metrics.ExpectedCalibrationError(
              num_bins=FLAGS.num_bins),
      })
      eval_dataset_splits += ['validation']
    for i in range(FLAGS.ensemble_size):
      for dataset_split in eval_dataset_splits:
        metrics[f'{dataset_split}/nll_member_{i}'] = tf.keras.metrics.Mean()
        metrics[f'{dataset_split}/accuracy_member_{i}'] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
    if FLAGS.corruptions_interval > 0:
      corrupt_metrics = {}
      for intensity in range(1, 6):
        for corruption in corruption_types:
          dataset_name = '{0}_{1}'.format(corruption, intensity)
          corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
              tf.keras.metrics.Mean())
          corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
              tf.keras.metrics.SparseCategoricalAccuracy())
          corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
              rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
    initial_epoch = 0
    if latest_checkpoint:
      # checkpoint.restore must be within a strategy.scope() so that optimizer
      # slot variables are mirrored.
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
      initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

  @tf.function
  def train_step(iterator):
    """Training StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images = inputs['features']
      labels = inputs['labels']
      images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
      labels = tf.tile(labels, [FLAGS.ensemble_size])

      with tf.GradientTape() as tape:
        logits = model(images, training=True)
        negative_log_likelihood = tf.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(labels,
                                                            logits,
                                                            from_logits=True))
        l2_loss = sum(model.losses)
        loss = negative_log_likelihood + l2_loss
        # Scale the loss given the TPUStrategy will reduce sum all gradients.
        scaled_loss = loss / strategy.num_replicas_in_sync

      grads = tape.gradient(scaled_loss, model.trainable_variables)

      # Separate learning rate implementation.
      if FLAGS.fast_weight_lr_multiplier != 1.0:
        grads_and_vars = []
        for grad, var in zip(grads, model.trainable_variables):
          # Apply different learning rate on the fast weight approximate
          # posterior/prior parameters. This is excludes BN and slow weights,
          # but pay caution to the naming scheme.
          if ('batch_norm' not in var.name and 'kernel' not in var.name):
            grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier, var))
          else:
            grads_and_vars.append((grad, var))
        optimizer.apply_gradients(grads_and_vars)
      else:
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

      probs = tf.nn.softmax(logits)
      metrics['train/ece'].add_batch(probs, label=labels)
      metrics['train/loss'].update_state(loss)
      metrics['train/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['train/accuracy'].update_state(labels, logits)

    for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
      strategy.run(step_fn, args=(next(iterator),))

  @tf.function
  def test_step(iterator, dataset_split, dataset_name, num_steps):
    """Evaluation StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images = inputs['features']
      labels = inputs['labels']
      images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
      logits = model(images, training=False)
      probs = tf.nn.softmax(logits)
      per_probs = tf.split(probs,
                           num_or_size_splits=FLAGS.ensemble_size,
                           axis=0)
      for i in range(FLAGS.ensemble_size):
        member_probs = per_probs[i]
        member_loss = tf.keras.losses.sparse_categorical_crossentropy(
            labels, member_probs)
        metrics[f'{dataset_split}/nll_member_{i}'].update_state(member_loss)
        metrics[f'{dataset_split}/accuracy_member_{i}'].update_state(
            labels, member_probs)

      probs = tf.reduce_mean(per_probs, axis=0)
      negative_log_likelihood = tf.reduce_mean(
          tf.keras.losses.sparse_categorical_crossentropy(labels, probs))
      if dataset_name == 'clean':
        metrics[f'{dataset_split}/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics[f'{dataset_split}/accuracy'].update_state(labels, probs)
        metrics[f'{dataset_split}/ece'].add_batch(probs, label=labels)
      else:
        corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
            probs, label=labels)

    for _ in tf.range(tf.cast(num_steps, tf.int32)):
      strategy.run(step_fn, args=(next(iterator),))

  metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

  train_iterator = iter(train_dataset)
  start_time = time.time()
  for epoch in range(initial_epoch, FLAGS.train_epochs):
    logging.info('Starting to run epoch: %s', epoch)
    train_step(train_iterator)

    current_step = (epoch + 1) * steps_per_epoch
    max_steps = steps_per_epoch * FLAGS.train_epochs
    time_elapsed = time.time() - start_time
    steps_per_sec = float(current_step) / time_elapsed
    eta_seconds = (max_steps - current_step) / steps_per_sec
    message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
               'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                   current_step / max_steps,
                   epoch + 1,
                   FLAGS.train_epochs,
                   steps_per_sec,
                   eta_seconds / 60,
                   time_elapsed / 60))
    logging.info(message)

    if validation_dataset:
      validation_iterator = iter(validation_dataset)
      test_step(
          validation_iterator, 'validation', 'clean', steps_per_validation)
    datasets_to_evaluate = {'clean': test_datasets['clean']}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      datasets_to_evaluate = test_datasets
    for dataset_name, test_dataset in datasets_to_evaluate.items():
      test_iterator = iter(test_dataset)
      logging.info('Testing on dataset %s', dataset_name)
      logging.info('Starting to run eval at epoch: %s', epoch)
      test_start_time = time.time()
      test_step(test_iterator, 'test', dataset_name, steps_per_eval)
      ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
      metrics['test/ms_per_example'].update_state(ms_per_example)

      logging.info('Done with testing on %s', dataset_name)

    corrupt_results = {}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                        corruption_types)

    logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                 metrics['train/loss'].result(),
                 metrics['train/accuracy'].result() * 100)
    logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                 metrics['test/negative_log_likelihood'].result(),
                 metrics['test/accuracy'].result() * 100)
    for i in range(FLAGS.ensemble_size):
      logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%',
                   i, metrics['test/nll_member_{}'.format(i)].result(),
                   metrics['test/accuracy_member_{}'.format(i)].result() * 100)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Metrics from Robustness Metrics (like ECE) will return a dict with a
    # single key/value, instead of a scalar.
    total_results = {
        k: (list(v.values())[0] if isinstance(v, dict) else v)
        for k, v in total_results.items()
    }
    with summary_writer.as_default():
      for name, result in total_results.items():
        tf.summary.scalar(name, result, step=epoch + 1)

    for metric in metrics.values():
      metric.reset_states()

    if (FLAGS.checkpoint_interval > 0 and
        (epoch + 1) % FLAGS.checkpoint_interval == 0):
      checkpoint_name = checkpoint.save(
          os.path.join(FLAGS.output_dir, 'checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)

  final_checkpoint_name = checkpoint.save(
      os.path.join(FLAGS.output_dir, 'checkpoint'))
  logging.info('Saved last checkpoint to %s', final_checkpoint_name)
  with summary_writer.as_default():
    hp.hparams({
        'base_learning_rate': FLAGS.base_learning_rate,
        'one_minus_momentum': FLAGS.one_minus_momentum,
        'l2': FLAGS.l2,
        'random_sign_init': FLAGS.random_sign_init,
        'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier,
    })
Beispiel #17
0
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

    test_builder = ub.datasets.ImageNetDataset(split=tfds.Split.TEST,
                                               use_bfloat16=FLAGS.use_bfloat16,
                                               data_dir=FLAGS.data_dir)
    clean_test_dataset = test_builder.load(batch_size=batch_size)
    test_datasets = {'clean': clean_test_dataset}
    corruption_types, max_intensity = utils.load_corrupted_test_info()
    for name in corruption_types:
        for intensity in range(1, max_intensity + 1):
            dataset_name = '{0}_{1}'.format(name, intensity)
            test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
                corruption_name=name,
                corruption_intensity=intensity,
                batch_size=batch_size,
                drop_remainder=True,
                use_bfloat16=False)

    model = ub.models.resnet50_sngp(
        input_shape=(224, 224, 3),
        batch_size=FLAGS.per_core_batch_size,
        num_classes=NUM_CLASSES,
        use_mc_dropout=FLAGS.use_mc_dropout,
        dropout_rate=FLAGS.dropout_rate,
        filterwise_dropout=FLAGS.filterwise_dropout,
        use_gp_layer=FLAGS.use_gp_layer,
        gp_hidden_dim=FLAGS.gp_hidden_dim,
        gp_scale=FLAGS.gp_scale,
        gp_bias=FLAGS.gp_bias,
        gp_input_normalization=FLAGS.gp_input_normalization,
        gp_random_feature_type=FLAGS.gp_random_feature_type,
        gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
        gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
        gp_output_imagenet_initializer=FLAGS.gp_output_imagenet_initializer,
        use_spec_norm=FLAGS.use_spec_norm,
        spec_norm_iteration=FLAGS.spec_norm_iteration,
        spec_norm_bound=FLAGS.spec_norm_bound)

    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval):
                    features, _ = next(test_iterator)  # pytype: disable=attribute-error
                    logits_member, covmat_member = model(features,
                                                         training=False)
                    logits_member = ed.layers.utils.mean_field_logits(
                        logits_member, covmat_member,
                        FLAGS.gp_mean_field_factor_ensemble)
                    logits.append(logits_member)

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(
            name)] = rm.metrics.ExpectedCalibrationError(
                num_bins=FLAGS.num_bins)

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval):
            _, labels = next(test_iterator)  # pytype: disable=attribute-error
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
            negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
            negative_log_likelihood_metric.add_batch(logits, labels=labels)
            negative_log_likelihood = list(
                negative_log_likelihood_metric.result().values())[0]
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
                gibbs_ce_metric.add_batch(logits, labels=labels)
                gibbs_ce = list(gibbs_ce_metric.result().values())[0]
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].add_batch(
                    probs, label=labels)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(
        corrupt_metrics, corruption_types, max_intensity,
        FLAGS.alexnet_errors_path)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    # Metrics from Robustness Metrics (like ECE) will return a dict with a
    # single key/value, instead of a scalar.
    total_results.update(corrupt_results)
    total_results = {
        k: (list(v.values())[0] if isinstance(v, dict) else v)
        for k, v in total_results.items()
    }
    logging.info('Metrics: %s', total_results)
Beispiel #18
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    train_dataset_size = ds_info.splits['train'].num_examples
    steps_per_epoch = train_dataset_size // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    train_dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TRAIN).load(batch_size=batch_size)
    clean_test_dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        extra_kwargs = {}
        if FLAGS.dataset == 'cifar100':
            extra_kwargs['data_dir'] = FLAGS.cifar100_c_path
        corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
        for corruption_type in corruption_types:
            for severity in range(1, 6):
                dataset = ub.datasets.get(
                    f'{FLAGS.dataset}_corrupted',
                    corruption_type=corruption_type,
                    severity=severity,
                    split=tfds.Split.TEST,
                    **extra_kwargs).load(batch_size=batch_size)
                test_datasets[f'{corruption_type}_{severity}'] = (
                    strategy.experimental_distribute_dataset(dataset))

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet_variational(
            input_shape=ds_info.features['image'].shape,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            prior_stddev=FLAGS.prior_stddev,
            dataset_size=train_dataset_size,
            stddev_init=FLAGS.stddev_init)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'train/kl':
            tf.keras.metrics.Mean(),
            'train/kl_scale':
            tf.keras.metrics.Mean(),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, 6):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))

                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the BN parameters and bias terms. This
                    # excludes only fast weight approximate posterior/prior parameters,
                    # but pay caution to their naming scheme.
                    if 'batch_norm' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                kl = sum(model.losses)
                kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
                kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
                kl_scale = tf.minimum(1., kl_scale)
                kl_loss = kl_scale * kl

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss + kl_loss
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/kl'].update_state(kl)
            metrics['train/kl_scale'].update_state(kl_scale)
            metrics['train/accuracy'].update_state(labels, logits)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            # TODO(trandustin): Use more eval samples only on corrupted predictions;
            # it's expensive but a one-time compute if scheduled post-training.
            if FLAGS.num_eval_samples > 1 and dataset_name != 'clean':
                logits = tf.stack([
                    model(images, training=False)
                    for _ in range(FLAGS.num_eval_samples)
                ],
                                  axis=0)
            else:
                logits = model(images, training=False)
            probs = tf.nn.softmax(logits)
            if FLAGS.num_eval_samples > 1 and dataset_name != 'clean':
                probs = tf.reduce_mean(probs, axis=0)
            negative_log_likelihood = tf.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)

        for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        train_step(train_iterator)

        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_start_time = time.time()
            test_step(test_iterator, dataset_name)
            ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
            metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate': FLAGS.base_learning_rate,
            'one_minus_momentum': FLAGS.one_minus_momentum,
            'l2': FLAGS.l2,
            'prior_stddev': FLAGS.prior_stddev,
            'stddev_init': FLAGS.stddev_init,
        })
Beispiel #19
0
def main(argv):
    del argv  # unused arg
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.enable_v2_behavior()
    tf.random.set_seed(FLAGS.seed)

    dataset_input_fn = utils.load_input_fn(tfds.Split.TEST,
                                           FLAGS.per_core_batch_size,
                                           name=FLAGS.dataset,
                                           use_bfloat16=False,
                                           normalize=True,
                                           drop_remainder=True,
                                           proportion=1.0)
    test_datasets = {'clean': dataset_input_fn()}

    ds_info = tfds.builder(FLAGS.dataset).info
    num_classes = ds_info.features['label'].num_classes
    model = deterministic.wide_resnet(
        input_shape=ds_info.features['image'].shape,
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        l2=0.,
        version=2)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.output_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Collect the logits output for each ensemble member and train/test data
    # point. We also collect the labels.
    # TODO(trandustin): Refactor data loader so you can get the full dataset in
    # memory without looping.
    logits_test = {'clean': []}
    labels_test = {'clean': []}
    corruption_types, max_intensity = utils.load_corrupted_test_info(
        FLAGS.dataset)
    for name in corruption_types:
        for intensity in range(1, max_intensity + 1):
            dataset_name = '{0}_{1}'.format(name, intensity)
            logits_test[dataset_name] = []
            labels_test[dataset_name] = []

            if FLAGS.dataset == 'cifar10':
                load_c_dataset = utils.load_cifar10_c_input_fn
            else:
                load_c_dataset = functools.partial(
                    utils.load_cifar100_c_input_fn, path=FLAGS.cifar100_c_path)
            corrupted_input_fn = load_c_dataset(
                corruption_name=name,
                corruption_intensity=intensity,
                batch_size=FLAGS.per_core_batch_size,
                use_bfloat16=False)
            test_datasets[dataset_name] = corrupted_input_fn()

    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        logging.info('Working on test data for ensemble member %s', m)
        for name, test_dataset in test_datasets.items():
            logits = []
            for features, labels in test_dataset:
                logits.append(model(features, training=False))
                if m == 0:
                    labels_test[name].append(labels)

            logits = tf.concat(logits, axis=0)
            logits_test[name].append(logits)
            if m == 0:
                labels_test[name] = tf.concat(labels_test[name], axis=0)
            logging.info('Finished testing on %s', format(name))

    metrics = {
        'test/ece':
        ed.metrics.ExpectedCalibrationError(num_classes=num_classes,
                                            num_bins=15)
    }
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/ece_{}'.format(
            name)] = ed.metrics.ExpectedCalibrationError(
                num_classes=num_classes, num_bins=15)
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(
            name)] = tf.keras.metrics.Mean()

    for name, test_dataset in test_datasets.items():
        labels = labels_test[name]
        logits = logits_test[name]
        nll_test = ensemble_negative_log_likelihood(labels, logits)
        gibbs_ce_test = gibbs_cross_entropy(labels_test[name],
                                            logits_test[name])
        labels = tf.cast(labels, tf.int32)
        logits = tf.convert_to_tensor(logits)
        per_probs = tf.nn.softmax(logits)
        probs = tf.reduce_mean(per_probs, axis=0)
        accuracy = tf.keras.metrics.sparse_categorical_accuracy(labels, probs)
        if name == 'clean':
            metrics['test/negative_log_likelihood'] = tf.reduce_mean(nll_test)
            metrics['test/gibbs_cross_entropy'] = tf.reduce_mean(gibbs_ce_test)
            metrics['test/accuracy'] = tf.reduce_mean(accuracy)
            metrics['test/ece'].update_state(labels, probs)
        else:
            corrupt_metrics['test/nll_{}'.format(name)].update_state(
                tf.reduce_mean(nll_test))
            corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                tf.reduce_mean(accuracy))
            corrupt_metrics['test/ece_{}'.format(name)].update_state(
                labels, probs)

    corrupt_results = {}
    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types,
                                                      max_intensity)
    metrics['test/ece'] = metrics['test/ece'].result()
    total_results = {name: metric for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    logging.info('Metrics: %s', total_results)
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
    batch_size = per_core_batch_size * FLAGS.num_cores
    check_bool = FLAGS.train_proportion > 0 and FLAGS.train_proportion <= 1
    assert check_bool, 'Proportion of train set has to meet 0 < prop <= 1.'

    drop_remainder_validation = True
    if not FLAGS.use_gpu:
        # This has to be True for TPU traing, otherwise the batchsize of images in
        # the validation set can't be determined by TPU compile.
        assert drop_remainder_validation, 'drop_remainder must be True in TPU mode.'

    validation_percent = 1 - FLAGS.train_proportion
    train_dataset = ub.datasets.get(
        FLAGS.dataset,
        split=tfds.Split.TRAIN,
        validation_percent=validation_percent).load(batch_size=batch_size)
    validation_dataset = ub.datasets.get(
        FLAGS.dataset,
        split=tfds.Split.VALIDATION,
        validation_percent=validation_percent,
        drop_remainder=drop_remainder_validation).load(batch_size=batch_size)
    validation_dataset = validation_dataset.repeat()
    clean_test_dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    validation_dataset = strategy.experimental_distribute_dataset(
        validation_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        extra_kwargs = {}
        if FLAGS.dataset == 'cifar100':
            extra_kwargs['data_dir'] = FLAGS.cifar100_c_path
        corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
        for corruption_type in corruption_types:
            for severity in range(1, 6):
                dataset = ub.datasets.get(
                    f'{FLAGS.dataset}_corrupted',
                    corruption_type=corruption_type,
                    severity=severity,
                    split=tfds.Split.TEST,
                    **extra_kwargs).load(batch_size=batch_size)
                test_datasets[f'{corruption_type}_{severity}'] = (
                    strategy.experimental_distribute_dataset(dataset))

    ds_info = tfds.builder(FLAGS.dataset).info
    train_sample_size = ds_info.splits[
        'train'].num_examples * FLAGS.train_proportion
    steps_per_epoch = int(train_sample_size / batch_size)
    train_sample_size = int(train_sample_size)

    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    logging.info('Building Keras model.')
    depth = 28
    width = 10

    dict_ranges = {'min': FLAGS.min_l2_range, 'max': FLAGS.max_l2_range}
    ranges = [dict_ranges for _ in range(6)]  # 6 independent l2 parameters
    model_config = {
        'key_to_index': {
            'input_conv_l2_kernel': 0,
            'group_l2_kernel': 1,
            'group_1_l2_kernel': 2,
            'group_2_l2_kernel': 3,
            'dense_l2_kernel': 4,
            'dense_l2_bias': 5,
        },
        'ranges': ranges,
        'test': None
    }
    lambdas_config = LambdaConfig(model_config['ranges'],
                                  model_config['key_to_index'])

    if FLAGS.e_body_hidden_units > 0:
        e_body_arch = '({},)'.format(FLAGS.e_body_hidden_units)
    else:
        e_body_arch = '()'
    e_shared_arch = '()'
    e_activation = 'tanh'
    filters_resnet = [16]
    for i in range(0, 3):  # 3 groups of blocks
        filters_resnet.extend([16 * width * 2**i] *
                              9)  # 9 layers in each block
    # e_head dim for conv2d is just the number of filters (only
    # kernel) and twice num of classes for the last dense layer (kernel + bias)
    e_head_dims = [x for x in filters_resnet] + [2 * num_classes]

    with strategy.scope():
        e_models = e_factory(
            lambdas_config.input_shape,
            e_head_dims=e_head_dims,
            e_body_arch=eval(e_body_arch),  # pylint: disable=eval-used
            e_shared_arch=eval(e_shared_arch),  # pylint: disable=eval-used
            activation=e_activation,
            use_bias=FLAGS.e_model_use_bias,
            e_head_init=FLAGS.init_emodels_stddev)

        model = wide_resnet_hyperbatchensemble(
            input_shape=ds_info.features['image'].shape,
            depth=depth,
            width_multiplier=width,
            num_classes=num_classes,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            config=lambdas_config,
            e_models=e_models,
            l2_batchnorm_layer=FLAGS.l2_batchnorm,
            regularize_fast_weights=FLAGS.regularize_fast_weights,
            fast_weights_eq_contraint=FLAGS.fast_weights_eq_contraint,
            version=2)

        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # build hyper-batchensemble complete -------------------------

        # Initialize Lambda distributions for tuning
        lambdas_mean = tf.reduce_mean(
            log_uniform_mean([lambdas_config.log_min, lambdas_config.log_max]))
        lambdas0 = tf.random.normal((FLAGS.ensemble_size, lambdas_config.dim),
                                    lambdas_mean,
                                    0.1 * FLAGS.ens_init_delta_bounds)
        lower0 = lambdas0 - tf.constant(FLAGS.ens_init_delta_bounds)
        lower0 = tf.maximum(lower0, 1e-8)
        upper0 = lambdas0 + tf.constant(FLAGS.ens_init_delta_bounds)

        log_lower = tf.Variable(tf.math.log(lower0))
        log_upper = tf.Variable(tf.math.log(upper0))
        lambda_parameters = [log_lower, log_upper]  # these variables are tuned
        clip_lambda_parameters(lambda_parameters, lambdas_config)

        # Optimizer settings to train model weights
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        # Note: Here, we don't divide the epochs by 200 as for the other uncertainty
        # baselines.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [int(l) for l in FLAGS.lr_decay_epochs]

        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)

        # tuner used for optimizing lambda_parameters
        tuner = tf.keras.optimizers.Adam(FLAGS.lr_tuning)

        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'train/disagreement':
            tf.keras.metrics.Mean(),
            'train/average_kl':
            tf.keras.metrics.Mean(),
            'train/cosine_similarity':
            tf.keras.metrics.Mean(),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/gibbs_nll':
            tf.keras.metrics.Mean(),
            'test/gibbs_accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/disagreement':
            tf.keras.metrics.Mean(),
            'test/average_kl':
            tf.keras.metrics.Mean(),
            'test/cosine_similarity':
            tf.keras.metrics.Mean(),
            'validation/loss':
            tf.keras.metrics.Mean(),
            'validation/loss_entropy':
            tf.keras.metrics.Mean(),
            'validation/loss_ce':
            tf.keras.metrics.Mean()
        }
        corrupt_metrics = {}

        for i in range(FLAGS.ensemble_size):
            metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
            metrics['test/accuracy_member_{}'.format(i)] = (
                tf.keras.metrics.SparseCategoricalAccuracy())
        if FLAGS.corruptions_interval > 0:
            for intensity in range(1, 6):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model,
                                         lambda_parameters=lambda_parameters,
                                         optimizer=optimizer)

        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint and FLAGS.restore_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            # generate lambdas
            lambdas = log_uniform_sample(per_core_batch_size,
                                         lambda_parameters)
            lambdas = tf.reshape(lambdas,
                                 (FLAGS.ensemble_size * per_core_batch_size,
                                  lambdas_config.dim))

            with tf.GradientTape() as tape:
                logits = model([images, lambdas], training=True)

                if FLAGS.use_gibbs_ce:
                    # Average of single model CEs
                    # tiling of labels should be only done for Gibbs CE loss
                    labels = tf.tile(labels, [FLAGS.ensemble_size])
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    # Ensemble CE uses no tiling of the labels
                    negative_log_likelihood = ensemble_crossentropy(
                        labels, logits, FLAGS.ensemble_size)
                # Note: Divide l2_loss by sample_size (this differs from uncertainty_
                # baselines implementation.)
                l2_loss = sum(model.losses) / train_sample_size
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate for fast weights.
            grads_and_vars = []
            for grad, var in zip(grads, model.trainable_variables):
                if (('alpha' in var.name or 'gamma' in var.name)
                        and 'batch_norm' not in var.name):
                    grads_and_vars.append(
                        (grad * FLAGS.fast_weight_lr_multiplier, var))
                else:
                    grads_and_vars.append((grad, var))
            optimizer.apply_gradients(grads_and_vars)

            probs = tf.nn.softmax(logits)
            per_probs = tf.split(probs,
                                 num_or_size_splits=FLAGS.ensemble_size,
                                 axis=0)
            per_probs_stacked = tf.stack(per_probs, axis=0)
            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)
            diversity = rm.metrics.AveragePairwiseDiversity()
            diversity.add_batch(per_probs_stacked,
                                num_models=FLAGS.ensemble_size)
            diversity_results = diversity.result()
            for k, v in diversity_results.items():
                metrics['train/' + k].update_state(v)

            if grads_and_vars:
                grads, _ = zip(*grads_and_vars)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def tuning_step(iterator):
        """Tuning StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            with tf.GradientTape(watch_accessed_variables=False) as tape:
                tape.watch(lambda_parameters)

                # sample lambdas
                if FLAGS.sample_and_tune:
                    lambdas = log_uniform_sample(per_core_batch_size,
                                                 lambda_parameters)
                else:
                    lambdas = log_uniform_mean(lambda_parameters)
                    lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0)
                lambdas = tf.reshape(lambdas,
                                     (FLAGS.ensemble_size *
                                      per_core_batch_size, lambdas_config.dim))
                # ensemble CE
                logits = model([images, lambdas], training=False)
                ce = ensemble_crossentropy(labels, logits, FLAGS.ensemble_size)
                # entropy penalty for lambda distribution
                entropy = FLAGS.tau * log_uniform_entropy(lambda_parameters)
                loss = ce - entropy
                scaled_loss = loss / strategy.num_replicas_in_sync

            gradients = tape.gradient(loss, lambda_parameters)
            tuner.apply_gradients(zip(gradients, lambda_parameters))

            metrics['validation/loss_ce'].update_state(
                ce / strategy.num_replicas_in_sync)
            metrics['validation/loss_entropy'].update_state(
                entropy / strategy.num_replicas_in_sync)
            metrics['validation/loss'].update_state(scaled_loss)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name, num_eval_samples=0):
        """Evaluation StepFn."""

        n_samples = num_eval_samples if num_eval_samples >= 0 else -num_eval_samples
        if num_eval_samples >= 0:
            # the +1 accounts for the fact that we add the mean of lambdas
            ensemble_size = FLAGS.ensemble_size * (1 + n_samples)
        else:
            ensemble_size = FLAGS.ensemble_size * n_samples

        def step_fn(inputs):
            """Per-Replica StepFn."""
            # Note that we don't use tf.tile for labels here
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [ensemble_size, 1, 1, 1])

            # get lambdas
            samples = log_uniform_sample(n_samples, lambda_parameters)
            if num_eval_samples >= 0:
                lambdas = log_uniform_mean(lambda_parameters)
                lambdas = tf.expand_dims(lambdas, 1)
                lambdas = tf.concat((lambdas, samples), 1)
            else:
                lambdas = samples

            # lambdas with shape (ens size, samples, dim of lambdas)
            rep_lambdas = tf.repeat(lambdas, per_core_batch_size, axis=1)
            rep_lambdas = tf.reshape(rep_lambdas,
                                     (ensemble_size * per_core_batch_size, -1))

            # eval on testsets
            logits = model([images, rep_lambdas], training=False)
            probs = tf.nn.softmax(logits)
            per_probs = tf.split(probs,
                                 num_or_size_splits=ensemble_size,
                                 axis=0)

            # per member performance and gibbs performance (average per member perf)
            if dataset_name == 'clean':
                for i in range(FLAGS.ensemble_size):
                    # we record the first sample of lambdas per batch-ens member
                    first_member_index = i * (ensemble_size //
                                              FLAGS.ensemble_size)
                    member_probs = per_probs[first_member_index]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)

                labels_tile = tf.tile(labels, [ensemble_size])
                metrics['test/gibbs_nll'].update_state(
                    tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels_tile, logits, from_logits=True)))
                metrics['test/gibbs_accuracy'].update_state(labels_tile, probs)

            # ensemble performance
            negative_log_likelihood = ensemble_crossentropy(
                labels, logits, ensemble_size)
            probs = tf.reduce_mean(per_probs, axis=0)
            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)

            if dataset_name == 'clean':
                per_probs_stacked = tf.stack(per_probs, axis=0)
                diversity = rm.metrics.AveragePairwiseDiversity()
                diversity.add_batch(per_probs_stacked,
                                    num_models=ensemble_size)
                diversity_results = diversity.result()
                for k, v in diversity_results.items():
                    metrics['test/' + k].update_state(v)

        strategy.run(step_fn, args=(next(iterator), ))

    logging.info('--- Starting training using %d examples. ---',
                 train_sample_size)
    train_iterator = iter(train_dataset)
    validation_iterator = iter(validation_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)
            do_tuning = (epoch >= FLAGS.tuning_warmup_epochs)
            if do_tuning and ((step + 1) % FLAGS.tuning_every_x_step == 0):
                tuning_step(validation_iterator)
                # clip lambda parameters if outside of range
                clip_lambda_parameters(lambda_parameters, lambdas_config)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        # evaluate on test data
        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_step(test_iterator, dataset_name, FLAGS.num_eval_samples)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types)
        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Validation Loss: %.4f, CE: %.4f, Entropy: %.4f',
                     metrics['validation/loss'].result(),
                     metrics['validation/loss_ce'].result(),
                     metrics['validation/loss_entropy'].result())
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)

        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update({
            name: metric.result()
            for name, metric in corrupt_metrics.items()
        })
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        # save checkpoint and lambdas config
        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            lambdas_cf = lambdas_config.get_config()
            filepath = os.path.join(FLAGS.output_dir, 'lambdas_config.p')
            with tf.io.gfile.GFile(filepath, 'wb') as fp:
                pickle.dump(lambdas_cf, fp, protocol=pickle.HIGHEST_PROTOCOL)
            logging.info('Saved checkpoint to %s', checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate':
            FLAGS.base_learning_rate,
            'one_minus_momentum':
            FLAGS.one_minus_momentum,
            'l2':
            FLAGS.l2,
            'random_sign_init':
            FLAGS.random_sign_init,
            'fast_weight_lr_multiplier':
            FLAGS.fast_weight_lr_multiplier,
        })
Beispiel #21
0
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    data_dir = FLAGS.data_dir
    dataset = ub.datasets.get(
        FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.TEST).load(batch_size=batch_size)
    validation_percent = 1. - FLAGS.train_proportion
    val_dataset = ub.datasets.get(
        dataset_name=FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.VALIDATION,
        validation_percent=validation_percent,
        drop_remainder=False).load(batch_size=batch_size)
    steps_per_val_eval = int(ds_info.splits['train'].num_examples *
                             validation_percent) // batch_size

    test_datasets = {'clean': dataset}
    if FLAGS.dataset == 'cifar100':
        data_dir = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                data_dir=data_dir,
                severity=severity,
                split=tfds.Split.TEST).load(batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet(input_shape=ds_info.features['image'].shape,
                                  depth=28,
                                  width_multiplier=10,
                                  num_classes=num_classes,
                                  l2=0.)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints
    ensemble_filenames = parse_checkpoint_dir(FLAGS.checkpoint_dir)

    model_pool_size = len(ensemble_filenames)
    logging.info('Model pool size: %s', model_pool_size)
    logging.info('Ensemble size: %s', FLAGS.ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 FLAGS.ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Compute the logits on the validation set
    val_logits, val_labels = [], []
    for m, ensemble_filename in enumerate(ensemble_filenames):
        # Enforce memory clean-up
        tf.keras.backend.clear_session()
        checkpoint.restore(ensemble_filename)
        val_iterator = iter(val_dataset)
        val_logits_m = []
        for _ in range(steps_per_val_eval):
            inputs = next(val_iterator)
            features = inputs['features']
            labels = inputs['labels']
            val_logits_m.append(model(features, training=False))
            if m == 0:
                val_labels.append(labels)

        val_logits.append(tf.concat(val_logits_m, axis=0))
        if m == 0:
            val_labels = tf.concat(val_labels, axis=0)

        percent = (m + 1.) / model_pool_size
        message = ('{:.1%} completion for prediction on validation set: '
                   'model {:d}/{:d}.'.format(percent, m + 1, model_pool_size))
        logging.info(message)

    selected_members, val_acc, val_nll = greedy_selection(
        val_logits, val_labels, FLAGS.ensemble_size, FLAGS.greedy_objective)
    unique_selected_members = list(set(selected_members))
    message = ('Members selected by greedy procedure: {} (with {} unique '
               'member(s))\n\t{}').format(
                   selected_members, len(unique_selected_members),
                   [ensemble_filenames[i] for i in selected_members])
    logging.info(message)
    val_metrics = {
        'val/accuracy': tf.keras.metrics.Mean(),
        'val/negative_log_likelihood': tf.keras.metrics.Mean()
    }
    val_metrics['val/accuracy'].update_state(val_acc)
    val_metrics['val/negative_log_likelihood'].update_state(val_nll)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, member_id in enumerate(unique_selected_members):
        ensemble_filename = ensemble_filenames[member_id]
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name,
                                                       member=member_id)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits.append(model(features, training=False))

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())

            numerator = m * num_datasets + (n + 1)
            denominator = len(unique_selected_members) * num_datasets
            percent = numerator / denominator
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1,
                                           len(unique_selected_members), n + 1,
                                           num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/diversity': rm.metrics.AveragePairwiseDiversity(),
    }
    metrics.update(val_metrics)
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
    for i in range(len(unique_selected_members)):
        metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
        metrics['test/accuracy_member_{}'.format(i)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for member_id in selected_members:
            filename = '{dataset}_{member}.npy'.format(dataset=name,
                                                       member=member_id)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval):
            labels = next(test_iterator)['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
            negative_log_likelihood_metric.add_batch(logits, labels=labels)
            negative_log_likelihood = list(
                negative_log_likelihood_metric.result().values())[0]
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
                gibbs_ce_metric.add_batch(logits, labels=labels)
                gibbs_ce = list(gibbs_ce_metric.result().values())[0]
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)

                # Attention must be paid to deal with duplicated members:
                # e.g.,
                #.    selected_members = [2, 7, 3, 3]
                #     unique_selected_members = [2, 3, 7]
                #     selected_members.index(3) --> 2
                for member_id in unique_selected_members:
                    i = selected_members.index(member_id)
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)
                metrics['test/diversity'].add_batch(per_probs)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].add_batch(
                    probs, label=labels)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Results from Robustness Metrics themselves return a dict, so flatten them.
    total_results = utils.flatten_dictionary(total_results)

    logging.info('Metrics: %s', total_results)
Beispiel #22
0
def main(argv):
    del argv  # unused arg

    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
    steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)

    imagenet_train = utils.ImageNetInput(is_training=True,
                                         data_dir=FLAGS.data_dir,
                                         batch_size=FLAGS.per_core_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16)
    imagenet_eval = utils.ImageNetInput(is_training=False,
                                        data_dir=FLAGS.data_dir,
                                        batch_size=FLAGS.per_core_batch_size,
                                        use_bfloat16=FLAGS.use_bfloat16)
    test_datasets = {
        'clean':
        strategy.experimental_distribute_datasets_from_function(
            imagenet_eval.input_fn)
    }
    if FLAGS.corruptions_interval > 0:
        corruption_types, max_intensity = utils.load_corrupted_test_info()
        for name in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset_name = '{0}_{1}'.format(name, intensity)
                corrupt_input_fn = utils.corrupt_test_input_fn(
                    batch_size=FLAGS.per_core_batch_size,
                    corruption_name=name,
                    corruption_intensity=intensity,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets[dataset_name] = (
                    strategy.experimental_distribute_datasets_from_function(
                        corrupt_input_fn))

    train_dataset = strategy.experimental_distribute_datasets_from_function(
        imagenet_train.input_fn)

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    with strategy.scope():
        logging.info('Building Keras ResNet-50 model')
        model = ub.models.resnet50_sngp(
            input_shape=(224, 224, 3),
            batch_size=None,
            num_classes=NUM_CLASSES,
            use_mc_dropout=FLAGS.use_mc_dropout,
            dropout_rate=FLAGS.dropout_rate,
            filterwise_dropout=FLAGS.filterwise_dropout,
            use_gp_layer=FLAGS.use_gp_layer,
            gp_hidden_dim=FLAGS.gp_hidden_dim,
            gp_scale=FLAGS.gp_scale,
            gp_bias=FLAGS.gp_bias,
            gp_input_normalization=FLAGS.gp_input_normalization,
            gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
            gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
            gp_output_imagenet_initializer=FLAGS.
            gp_output_imagenet_initializer,
            use_spec_norm=FLAGS.use_spec_norm,
            spec_norm_iteration=FLAGS.spec_norm_iteration,
            spec_norm_bound=FLAGS.spec_norm_bound)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Scale learning rate and decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 256
        learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr,
                                                   FLAGS.train_epochs,
                                                   _LR_SCHEDULE)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/stddev': tf.keras.metrics.Mean(),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())

        logging.info('Finished building Keras ResNet-50 model')

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            with tf.GradientTape() as tape:
                logits = model(images, training=True)

                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract logits
                    logits, _ = logits
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the weights. This excludes BN parameters and biases, but
                    # pay caution to their naming scheme.
                    if 'kernel' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs

            logits_list = []
            stddev_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)

                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract both
                    logits, covmat = logits
                else:
                    covmat = tf.eye(FLAGS.per_core_batch_size)

                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                logits = ed.layers.utils.mean_field_logits(
                    logits,
                    covmat,
                    mean_field_factor=FLAGS.gp_mean_field_factor)
                stddev = tf.sqrt(tf.linalg.diag_part(covmat))

                stddev_list.append(stddev)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            stddev_list = tf.stack(stddev_list, axis=0)

            stddev = tf.reduce_mean(stddev_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
                metrics['test/stddev'].update_state(stddev)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/stddev_{}'.format(
                    dataset_name)].update_state(stddev)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity,
                FLAGS.alexnet_errors_path)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    # Save final checkpoint.
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)

    # Export final model as SavedModel.
    final_save_name = os.path.join(FLAGS.output_dir, 'model')
    model.save(final_save_name)
    logging.info('Saved model to %s', final_save_name)
Beispiel #23
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    data_dir = FLAGS.data_dir
    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores

    if FLAGS.dataset == 'cifar10':
        dataset_builder_class = ub.datasets.Cifar10Dataset
    else:
        dataset_builder_class = ub.datasets.Cifar100Dataset
    train_builder = dataset_builder_class(data_dir=data_dir,
                                          download_data=FLAGS.download_data,
                                          split=tfds.Split.TRAIN,
                                          use_bfloat16=FLAGS.use_bfloat16,
                                          validation_percent=1. -
                                          FLAGS.train_proportion)
    train_dataset = train_builder.load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)

    validation_dataset = None
    steps_per_validation = 0
    if FLAGS.train_proportion < 1.0:
        validation_builder = dataset_builder_class(
            data_dir=data_dir,
            split=tfds.Split.VALIDATION,
            use_bfloat16=FLAGS.use_bfloat16,
            validation_percent=1. - FLAGS.train_proportion)
        validation_dataset = validation_builder.load(batch_size=batch_size)
        validation_dataset = strategy.experimental_distribute_dataset(
            validation_dataset)
        steps_per_validation = validation_builder.num_examples // batch_size

    clean_test_dataset_builder = dataset_builder_class(
        data_dir=data_dir,
        split=tfds.Split.TEST,
        use_bfloat16=FLAGS.use_bfloat16)
    clean_test_dataset = clean_test_dataset_builder.load(
        batch_size=test_batch_size)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    steps_per_epoch = train_builder.num_examples // batch_size
    steps_per_eval = clean_test_dataset_builder.num_examples // batch_size
    num_classes = 100 if FLAGS.dataset == 'cifar100' else 10
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_dataset = utils.load_cifar10_c
        else:
            load_c_dataset = functools.partial(utils.load_cifar100_c,
                                               path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset = load_c_dataset(corruption_name=corruption,
                                         corruption_intensity=intensity,
                                         batch_size=test_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet_condconv(
            input_shape=(32, 32, 3),
            depth=28,
            width_multiplier=FLAGS.resnet_width_multiplier,
            num_classes=num_classes,
            num_experts=FLAGS.num_experts,
            per_core_batch_size=FLAGS.per_core_batch_size,
            use_cond_dense=FLAGS.use_cond_dense,
            reduce_dense_outputs=FLAGS.reduce_dense_outputs,
            cond_placement=FLAGS.cond_placement,
            routing_fn=FLAGS.routing_fn,
            normalize_routing=FLAGS.normalize_routing,
            normalize_dense_routing=FLAGS.normalize_dense_routing,
            top_k=FLAGS.top_k,
            routing_pooling=FLAGS.routing_pooling,
            l2=FLAGS.l2)
        # reuse_routing=FLAGS.reuse_routing,
        # shared_routing_type=FLAGS.shared_routing_type)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        eval_dataset_splits = ['test']
        if validation_dataset:
            metrics.update({
                'validation/negative_log_likelihood':
                tf.keras.metrics.Mean(),
                'validation/accuracy':
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'validation/ece':
                rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            })
            eval_dataset_splits += ['validation']
        if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
            for dataset_split in eval_dataset_splits:
                metrics.update({
                    f'{dataset_split}/nll_poe':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/nll_moe':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/nll_unweighted_poe':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/nll_unweighted_moe':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/unweighted_gibbs_ce':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/ece_unweighted_moe':
                    rm.metrics.ExpectedCalibrationError(
                        num_bins=FLAGS.num_bins),
                    f'{dataset_split}/accuracy_unweighted_moe':
                    tf.keras.metrics.SparseCategoricalAccuracy(),
                    f'{dataset_split}/ece_poe':
                    rm.metrics.ExpectedCalibrationError(
                        num_bins=FLAGS.num_bins),
                    f'{dataset_split}/accuracy_poe':
                    tf.keras.metrics.SparseCategoricalAccuracy(),
                    f'{dataset_split}/ece_unweighted_poe':
                    rm.metrics.ExpectedCalibrationError(
                        num_bins=FLAGS.num_bins),
                    f'{dataset_split}/accuracy_unweighted_poe':
                    tf.keras.metrics.SparseCategoricalAccuracy(),
                })
                for idx in range(FLAGS.num_experts):
                    metrics[f'{dataset_split}/dense_routing_weight_{idx}'] = (
                        tf.keras.metrics.Mean())
                    metrics[
                        f'{dataset_split}/dense_routing_weight_normalized_{idx}'] = (
                            tf.keras.metrics.Mean())

        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/nll_weighted_moe_{}'.format(
                        dataset_name)] = (tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_weighted_moe_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_weighted_moe_{}'.format(
                        dataset_name)] = (rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    def _process_3d_logits(logits, routing_weights, labels):
        routing_weights_3d = tf.expand_dims(routing_weights, axis=-1)
        weighted_logits = tf.math.reduce_mean(routing_weights_3d * logits,
                                              axis=1)
        unweighted_logits = tf.math.reduce_mean(logits, axis=1)

        probs = tf.nn.softmax(logits)
        unweighted_probs = tf.math.reduce_mean(probs, axis=1)
        weighted_probs = tf.math.reduce_sum(routing_weights_3d * probs, axis=1)

        labels_broadcasted = tf.tile(tf.reshape(labels, (-1, 1)),
                                     (1, FLAGS.num_experts))
        neg_log_likelihoods = tf.keras.losses.sparse_categorical_crossentropy(
            labels_broadcasted, logits, from_logits=True)
        unweighted_gibbs_ce = tf.math.reduce_mean(neg_log_likelihoods)
        weighted_gibbs_ce = tf.math.reduce_mean(
            tf.math.reduce_sum(routing_weights * neg_log_likelihoods, axis=1))
        return {
            'weighted_logits': weighted_logits,
            'unweighted_logits': unweighted_logits,
            'unweighted_probs': unweighted_probs,
            'weighted_probs': weighted_probs,
            'neg_log_likelihoods': neg_log_likelihoods,
            'unweighted_gibbs_ce': unweighted_gibbs_ce,
            'weighted_gibbs_ce': weighted_gibbs_ce
        }

    def _process_3d_logits_train(logits, routing_weights, labels):
        processing_results = _process_3d_logits(logits, routing_weights,
                                                labels)
        if FLAGS.loss == 'gibbs_ce':
            probs = processing_results['weighted_probs']
            negative_log_likelihood = processing_results['weighted_gibbs_ce']
        elif FLAGS.loss == 'unweighted_gibbs_ce':
            probs = processing_results['unweighted_probs']
            negative_log_likelihood = processing_results['unweighted_gibbs_ce']
        elif FLAGS.loss == 'moe':
            probs = processing_results['weighted_probs']
            negative_log_likelihood = tf.math.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(
                    labels, probs, from_logits=False))
        elif FLAGS.loss == 'unweighted_moe':
            probs = processing_results['unweighted_probs']
            negative_log_likelihood = tf.math.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(
                    labels, probs, from_logits=False))
        elif FLAGS.loss == 'poe':
            probs = tf.softmax(processing_results['weighted_logits'])
            negative_log_likelihood = tf.math.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(
                    labels,
                    processing_results['weighted_logits'],
                    from_logits=True))
        elif FLAGS.loss == 'unweighted_poe':
            probs = tf.softmax(processing_results['unweighted_logits'])
            negative_log_likelihood = tf.math.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(
                    labels,
                    processing_results['unweighted_logits'],
                    from_logits=True))
        return probs, negative_log_likelihood

    def _process_3d_logits_test(routing_weights, logits, labels):
        processing_results = _process_3d_logits(logits, routing_weights,
                                                labels)
        nll_poe = tf.math.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels,
                processing_results['weighted_logits'],
                from_logits=True))
        nll_unweighted_poe = tf.math.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels,
                processing_results['unweighted_logits'],
                from_logits=True))
        nll_moe = tf.math.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels,
                processing_results['weighted_probs'],
                from_logits=False))
        nll_unweighted_moe = tf.math.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels,
                processing_results['unweighted_probs'],
                from_logits=False))
        return {
            'nll_poe': nll_poe,
            'nll_moe': nll_moe,
            'nll_unweighted_poe': nll_unweighted_poe,
            'nll_unweighted_moe': nll_unweighted_moe,
            'unweighted_gibbs_ce': processing_results['unweighted_gibbs_ce'],
            'weighted_gibbs_ce': processing_results['weighted_gibbs_ce'],
            'weighted_probs': processing_results['weighted_probs'],
            'unweighted_probs': processing_results['unweighted_probs'],
            'weighted_logits': processing_results['weighted_logits'],
            'unweighted_logits': processing_results['unweighted_logits']
        }

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                # if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                if not isinstance(logits, (list, tuple)):
                    raise ValueError('Logits are not a tuple.')
                # logits is a `Tensor` of shape [batch_size, num_experts, num_classes]
                logits, all_routing_weights = logits
                # routing_weights is a `Tensor` of shape [batch_size, num_experts]
                routing_weights = all_routing_weights[-1]
                if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                    probs, negative_log_likelihood = _process_3d_logits_train(
                        logits, routing_weights, labels)
                else:
                    probs = tf.nn.softmax(logits)
                    # Prior to reduce_mean the NLLs are of the shape [batch, num_experts].
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))

                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, probs)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_split, dataset_name, num_steps):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            logits = model(images, training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            if not isinstance(logits, (list, tuple)):
                raise ValueError('Logits not a tuple')
            # logits is a `Tensor` of shape [batch_size, num_experts, num_classes]
            # routing_weights is a `Tensor` of shape [batch_size, num_experts]
            logits, all_routing_weights = logits
            routing_weights = all_routing_weights[-1]
            if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                results = _process_3d_logits_test(routing_weights, logits,
                                                  labels)
            else:
                probs = tf.nn.softmax(logits)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, probs))

            if dataset_name == 'clean':
                if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                    metrics[f'{dataset_split}/nll_poe'].update_state(
                        results['nll_poe'])
                    metrics[f'{dataset_split}/nll_moe'].update_state(
                        results['nll_moe'])
                    metrics[
                        f'{dataset_split}/nll_unweighted_poe'].update_state(
                            results['nll_unweighted_poe'])
                    metrics[
                        f'{dataset_split}/nll_unweighted_moe'].update_state(
                            results['nll_unweighted_moe'])
                    metrics[
                        f'{dataset_split}/unweighted_gibbs_ce'].update_state(
                            results['unweighted_gibbs_ce'])
                    metrics[
                        f'{dataset_split}/negative_log_likelihood'].update_state(
                            results['weighted_gibbs_ce'])
                    metrics[f'{dataset_split}/ece'].add_batch(
                        results['weighted_probs'], label=labels)
                    metrics[f'{dataset_split}/accuracy'].update_state(
                        labels, results['weighted_probs'])
                    metrics[f'{dataset_split}/ece_unweighted_moe'].add_batch(
                        results['unweighted_probs'], label=labels)
                    metrics[
                        f'{dataset_split}/accuracy_unweighted_moe'].update_state(
                            labels, results['unweighted_probs'])
                    metrics[f'{dataset_split}/ece_poe'].add_batch(
                        results['weighted_logits'], label=labels)
                    metrics[f'{dataset_split}/accuracy_poe'].update_state(
                        labels, results['weighted_logits'])
                    metrics[f'{dataset_split}/ece_unweighted_poe'].add_batch(
                        results['unweighted_logits'], label=labels)
                    metrics[
                        f'{dataset_split}/accuracy_unweighted_poe'].update_state(
                            labels, results['unweighted_logits'])
                    # TODO(ghassen): summarize all routing weights not only last layer's.
                    average_routing_weights = tf.math.reduce_mean(
                        routing_weights, axis=0)
                    routing_weights_sum = tf.math.reduce_sum(
                        average_routing_weights)
                    for idx in range(FLAGS.num_experts):
                        metrics[
                            f'{dataset_split}/dense_routing_weight_{idx}'].update_state(
                                average_routing_weights[idx])
                        key = f'{dataset_split}/dense_routing_weight_normalized_{idx}'
                        metrics[key].update_state(
                            average_routing_weights[idx] / routing_weights_sum)
                    # TODO(ghassen): add more metrics for expert utilization,
                    # load loss and importance/balance loss.
                else:
                    metrics[
                        f'{dataset_split}/negative_log_likelihood'].update_state(
                            negative_log_likelihood)
                    metrics[f'{dataset_split}/accuracy'].update_state(
                        labels, probs)
                    metrics[f'{dataset_split}/ece'].add_batch(probs,
                                                              label=labels)
            else:
                # TODO(ghassen): figure out how to aggregate probs for the OOD case.
                if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                    corrupt_metrics['test/nll_{}'.format(
                        dataset_name)].update_state(
                            results['unweighted_gibbs_ce'])
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)].update_state(
                            labels, results['unweighted_probs'])
                    corrupt_metrics['test/ece_{}'.format(
                        dataset_name)].add_batch(results['unweighted_probs'],
                                                 label=labels)

                    corrupt_metrics['test/nll_weighted_moe{}'.format(
                        dataset_name)].update_state(
                            results['weighted_gibbs_ce'])
                    corrupt_metrics['test/accuracy_weighted_moe_{}'.format(
                        dataset_name)].update_state(labels,
                                                    results['weighted_probs'])
                    corrupt_metrics['test/ece_weighted_moe{}'.format(
                        dataset_name)].add_batch(results['weighted_probs'],
                                                 label=labels)
                else:
                    corrupt_metrics['test/nll_{}'.format(
                        dataset_name)].update_state(negative_log_likelihood)
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)].update_state(labels, probs)
                    corrupt_metrics['test/ece_{}'.format(
                        dataset_name)].add_batch(probs, label=labels)

        for _ in tf.range(tf.cast(num_steps, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        train_step(train_iterator)

        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)

        if validation_dataset:
            validation_iterator = iter(validation_dataset)
            test_step(validation_iterator, 'validation', 'clean',
                      steps_per_validation)
        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_start_time = time.time()
            test_step(test_iterator, 'test', dataset_name, steps_per_eval)
            ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
            metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate':
            FLAGS.base_learning_rate,
            'one_minus_momentum':
            FLAGS.one_minus_momentum,
            'l2':
            FLAGS.l2,
            'dropout_rate':
            FLAGS.dropout_rate,
            'num_dropout_samples':
            FLAGS.num_dropout_samples,
            'num_dropout_samples_training':
            FLAGS.num_dropout_samples_training,
        })
def main(argv):
    del argv  # unused arg
    tf.random.set_seed(FLAGS.seed)

    per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
    batch_size = per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
    steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

    logging.info('Saving checkpoints at %s', FLAGS.output_dir)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    train_builder = ub.datasets.ImageNetDataset(
        split=tfds.Split.TRAIN, use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = train_builder.load(batch_size=batch_size,
                                       strategy=strategy)
    test_builder = ub.datasets.ImageNetDataset(split=tfds.Split.TEST,
                                               use_bfloat16=FLAGS.use_bfloat16)
    clean_test_dataset = test_builder.load(batch_size=batch_size,
                                           strategy=strategy)
    test_datasets = {'clean': clean_test_dataset}
    if FLAGS.corruptions_interval > 0:
        corruption_types, max_intensity = utils.load_corrupted_test_info()
        for name in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset_name = '{0}_{1}'.format(name, intensity)
                dataset = utils.load_corrupted_test_dataset(
                    batch_size=batch_size,
                    corruption_name=name,
                    corruption_intensity=intensity,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets[dataset_name] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras ResNet-50 model')
        model = ub.models.resnet50_het_rank1(
            input_shape=(224, 224, 3),
            num_classes=NUM_CLASSES,
            alpha_initializer=FLAGS.alpha_initializer,
            gamma_initializer=FLAGS.gamma_initializer,
            alpha_regularizer=FLAGS.alpha_regularizer,
            gamma_regularizer=FLAGS.gamma_regularizer,
            use_additive_perturbation=FLAGS.use_additive_perturbation,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            dropout_rate=FLAGS.dropout_rate,
            prior_stddev=FLAGS.prior_stddev,
            use_tpu=not FLAGS.use_gpu,
            use_ensemble_bn=FLAGS.use_ensemble_bn,
            num_factors=FLAGS.num_factors,
            temperature=FLAGS.temperature,
            num_mc_samples=FLAGS.num_mc_samples)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Scale learning rate and decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 256
        decay_epochs = [
            (FLAGS.train_epochs * 30) // 90,
            (FLAGS.train_epochs * 60) // 90,
            (FLAGS.train_epochs * 80) // 90,
        ]
        learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch=steps_per_epoch,
            base_learning_rate=base_lr,
            decay_ratio=0.1,
            decay_epochs=decay_epochs,
            warmup_epochs=5)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/kl':
            tf.keras.metrics.Mean(),
            'train/kl_scale':
            tf.keras.metrics.Mean(),
            'train/elbo':
            tf.keras.metrics.Mean(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'train/diversity':
            rm.metrics.AveragePairwiseDiversity(),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/kl':
            tf.keras.metrics.Mean(),
            'test/elbo':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/diversity':
            rm.metrics.AveragePairwiseDiversity(),
            'test/member_accuracy_mean':
            (tf.keras.metrics.SparseCategoricalAccuracy()),
            'test/member_ece_mean':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/kl_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/elbo_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        if FLAGS.ensemble_size > 1:
            for i in range(FLAGS.ensemble_size):
                metrics['test/nll_member_{}'.format(
                    i)] = tf.keras.metrics.Mean()
                metrics['test/accuracy_member_{}'.format(i)] = (
                    tf.keras.metrics.SparseCategoricalAccuracy())

        logging.info('Finished building Keras ResNet-50 model')

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    def compute_l2_loss(model):
        filtered_variables = []
        for var in model.trainable_variables:
            # Apply l2 on the BN parameters and bias terms. This
            # excludes only fast weight approximate posterior/prior parameters,
            # but pay caution to their naming scheme.
            if ('kernel' in var.name or 'batch_norm' in var.name
                    or 'bias' in var.name):
                filtered_variables.append(tf.reshape(var, (-1, )))
        l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
            tf.concat(filtered_variables, axis=0))
        return l2_loss

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            if FLAGS.ensemble_size > 1:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
                labels = tf.tile(labels, [FLAGS.ensemble_size])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                probs = tf.nn.softmax(logits)
                if FLAGS.ensemble_size > 1:
                    per_probs = tf.reshape(
                        probs,
                        tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]],
                                  0))
                    metrics['train/diversity'].add_batch(per_probs)

                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = compute_l2_loss(model)
                kl = sum(model.losses) / APPROX_IMAGENET_TRAIN_IMAGES
                kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
                kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
                kl_scale = tf.minimum(1., kl_scale)
                kl_loss = kl_scale * kl

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss + kl_loss
                scaled_loss = loss / strategy.num_replicas_in_sync
                elbo = -(negative_log_likelihood + l2_loss + kl)

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate implementation.
            if FLAGS.fast_weight_lr_multiplier != 1.0:
                grads_and_vars = []
                for grad, var in zip(grads, model.trainable_variables):
                    # Apply different learning rate on the fast weights. This excludes BN
                    # and slow weights, but pay caution to the naming scheme.
                    if ('batch_norm' not in var.name
                            and 'kernel' not in var.name):
                        grads_and_vars.append(
                            (grad * FLAGS.fast_weight_lr_multiplier, var))
                    else:
                        grads_and_vars.append((grad, var))
                optimizer.apply_gradients(grads_and_vars)
            else:
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/kl'].update_state(kl)
            metrics['train/kl_scale'].update_state(kl_scale)
            metrics['train/elbo'].update_state(elbo)
            metrics['train/loss'].update_state(loss)
            metrics['train/accuracy'].update_state(labels, logits)
            metrics['train/ece'].add_batch(probs, label=labels)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            if FLAGS.ensemble_size > 1:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
            logits = tf.reshape([
                model(images, training=False)
                for _ in range(FLAGS.num_eval_samples)
            ], [FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, NUM_CLASSES])
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            all_probs = tf.nn.softmax(logits)
            probs = tf.math.reduce_mean(all_probs, axis=[0, 1])  # marginalize

            # Negative log marginal likelihood computed in a numerically-stable way.
            labels_broadcasted = tf.broadcast_to(
                labels,
                [FLAGS.num_eval_samples, FLAGS.ensemble_size, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) +
                tf.math.log(float(FLAGS.num_eval_samples *
                                  FLAGS.ensemble_size)))

            l2_loss = compute_l2_loss(model)
            kl = sum(model.losses) / IMAGENET_VALIDATION_IMAGES
            elbo = -(negative_log_likelihood + l2_loss + kl)

            if dataset_name == 'clean':
                if FLAGS.ensemble_size > 1:
                    per_probs = tf.reduce_mean(all_probs,
                                               axis=0)  # marginalize samples
                    metrics['test/diversity'].add_batch(per_probs)
                    for i in range(FLAGS.ensemble_size):
                        member_probs = per_probs[i]
                        member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                            labels, member_probs)
                        metrics['test/nll_member_{}'.format(i)].update_state(
                            member_loss)
                        metrics['test/accuracy_member_{}'.format(
                            i)].update_state(labels, member_probs)
                        metrics['test/member_accuracy_mean'].update_state(
                            labels, member_probs)
                        metrics['test/member_ece_mean'].add_batch(member_probs,
                                                                  label=labels)

                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/kl'].update_state(kl)
                metrics['test/elbo'].update_state(elbo)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/kl_{}'.format(
                    dataset_name)].update_state(kl)
                corrupt_metrics['test/elbo_{}'.format(
                    dataset_name)].update_state(elbo)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)

        for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    train_iterator = iter(train_dataset)
    start_time = time.time()

    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        train_step(train_iterator)

        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            logging.info('Testing on dataset %s', dataset_name)
            test_iterator = iter(test_dataset)
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_step(test_iterator, dataset_name)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity,
                FLAGS.alexnet_errors_path)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)

        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)

        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Results from Robustness Metrics themselves return a dict, so flatten them.
        total_results = utils.flatten_dictionary(total_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate': FLAGS.base_learning_rate,
            'one_minus_momentum': FLAGS.one_minus_momentum,
            'l2': FLAGS.l2,
            'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier,
            'num_eval_samples': FLAGS.num_eval_samples,
        })
Beispiel #25
0
def main(argv):
  del argv  # unused arg
  if not FLAGS.use_gpu:
    raise ValueError('Only GPU is currently supported.')
  if FLAGS.num_cores > 1:
    raise ValueError('Only a single accelerator is currently supported.')
  tf.random.set_seed(FLAGS.seed)
  tf.io.gfile.makedirs(FLAGS.output_dir)

  ds_info = tfds.builder(FLAGS.dataset).info
  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_eval = ds_info.splits['test'].num_examples // batch_size
  num_classes = ds_info.features['label'].num_classes

  dataset_input_fn = utils.load_input_fn(
      split=tfds.Split.TEST,
      name=FLAGS.dataset,
      batch_size=FLAGS.per_core_batch_size,
      use_bfloat16=FLAGS.use_bfloat16)
  test_datasets = {'clean': dataset_input_fn()}
  corruption_types, max_intensity = utils.load_corrupted_test_info(
      FLAGS.dataset)
  for name in corruption_types:
    for intensity in range(1, max_intensity + 1):
      dataset_name = '{0}_{1}'.format(name, intensity)
      if FLAGS.dataset == 'cifar10':
        load_c_dataset = utils.load_cifar10_c_input_fn
      else:
        load_c_dataset = functools.partial(
            utils.load_cifar100_c_input_fn, path=FLAGS.cifar100_c_path)
      corrupted_input_fn = load_c_dataset(
          corruption_name=name,
          corruption_intensity=intensity,
          batch_size=FLAGS.per_core_batch_size,
          use_bfloat16=FLAGS.use_bfloat16)
      test_datasets[dataset_name] = corrupted_input_fn()

  model = ub.models.wide_resnet(
      input_shape=ds_info.features['image'].shape,
      depth=28,
      width_multiplier=10,
      num_classes=num_classes,
      l2=0.,
      version=2)
  logging.info('Model input shape: %s', model.input_shape)
  logging.info('Model output shape: %s', model.output_shape)
  logging.info('Model number of weights: %s', model.count_params())

  # Search for checkpoints from their index file; then remove the index suffix.
  ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
                                                     '**/*.index'))
  ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
  ensemble_size = len(ensemble_filenames)
  logging.info('Ensemble size: %s', ensemble_size)
  logging.info('Ensemble number of weights: %s',
               ensemble_size * model.count_params())
  logging.info('Ensemble filenames: %s', str(ensemble_filenames))
  checkpoint = tf.train.Checkpoint(model=model)

  # Write model predictions to files.
  num_datasets = len(test_datasets)
  for m, ensemble_filename in enumerate(ensemble_filenames):
    checkpoint.restore(ensemble_filename)
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      if not tf.io.gfile.exists(filename):
        logits = []
        test_iterator = iter(test_dataset)
        for _ in range(steps_per_eval):
          features, _ = next(test_iterator)  # pytype: disable=attribute-error
          logits.append(model(features, training=False))

        logits = tf.concat(logits, axis=0)
        with tf.io.gfile.GFile(filename, 'w') as f:
          np.save(f, logits.numpy())
      percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
      message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                 'Dataset {:d}/{:d}'.format(percent,
                                            m + 1,
                                            ensemble_size,
                                            n + 1,
                                            num_datasets))
      logging.info(message)

  metrics = {
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
      'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
      'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
  }
  corrupt_metrics = {}
  for name in test_datasets:
    corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
    corrupt_metrics['test/accuracy_{}'.format(name)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
    corrupt_metrics['test/ece_{}'.format(name)] = (
        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
  for i in range(ensemble_size):
    metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
    metrics['test/accuracy_member_{}'.format(i)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
  test_diversity = {
      'test/disagreement': tf.keras.metrics.Mean(),
      'test/average_kl': tf.keras.metrics.Mean(),
      'test/cosine_similarity': tf.keras.metrics.Mean(),
  }
  metrics.update(test_diversity)

  # Evaluate model predictions.
  for n, (name, test_dataset) in enumerate(test_datasets.items()):
    logits_dataset = []
    for m in range(ensemble_size):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      with tf.io.gfile.GFile(filename, 'rb') as f:
        logits_dataset.append(np.load(f))

    logits_dataset = tf.convert_to_tensor(logits_dataset)
    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      _, labels = next(test_iterator)  # pytype: disable=attribute-error
      logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
      labels = tf.cast(labels, tf.int32)
      negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
      per_probs = tf.nn.softmax(logits)
      probs = tf.reduce_mean(per_probs, axis=0)
      if name == 'clean':
        gibbs_ce = um.gibbs_cross_entropy(labels, logits)
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)

        for i in range(ensemble_size):
          member_probs = per_probs[i]
          member_loss = tf.keras.losses.sparse_categorical_crossentropy(
              labels, member_probs)
          metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
          metrics['test/accuracy_member_{}'.format(i)].update_state(
              labels, member_probs)
        diversity_results = um.average_pairwise_diversity(
            per_probs, ensemble_size)
        for k, v in diversity_results.items():
          test_diversity['test/' + k].update_state(v)
      else:
        corrupt_metrics['test/nll_{}'.format(name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(name)].update_state(
            labels, probs)

    message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
        (n + 1) / num_datasets, n + 1, num_datasets))
    logging.info(message)

  corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                    corruption_types,
                                                    max_intensity)
  total_results = {name: metric.result() for name, metric in metrics.items()}
  total_results.update(corrupt_results)
  logging.info('Metrics: %s', total_results)
Beispiel #26
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores //
                  FLAGS.num_dropout_samples_training)
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = ds_info.splits['train'].num_examples // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size
    num_classes = ds_info.features['label'].num_classes

    train_dataset = utils.load_dataset(split=tfds.Split.TRAIN,
                                       name=FLAGS.dataset,
                                       batch_size=batch_size,
                                       use_bfloat16=FLAGS.use_bfloat16)
    clean_test_dataset = utils.load_dataset(split=tfds.Split.TEST,
                                            name=FLAGS.dataset,
                                            batch_size=test_batch_size,
                                            use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_dataset = utils.load_cifar10_c
        else:
            load_c_dataset = functools.partial(utils.load_cifar100_c,
                                               path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset = load_c_dataset(corruption_name=corruption,
                                         corruption_intensity=intensity,
                                         batch_size=test_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet_dropout(
            input_shape=ds_info.features['image'].shape,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            l2=FLAGS.l2,
            dropout_rate=FLAGS.dropout_rate,
            residual_dropout=FLAGS.residual_dropout,
            filterwise_dropout=FLAGS.filterwise_dropout)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            images = tf.tile(images,
                             [FLAGS.num_dropout_samples_training, 1, 1, 1])
            labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs

            logits_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.total_batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    data_dir = FLAGS.data_dir
    dataset_builder = ub.datasets.get(
        FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.TEST,
        drop_remainder=FLAGS.drop_remainder_for_eval)
    dataset = dataset_builder.load(batch_size=batch_size)
    test_datasets = {'clean': dataset}
    if FLAGS.eval_on_ood:
        ood_dataset_names = FLAGS.ood_dataset
        ood_datasets, steps_per_ood = ood_utils.load_ood_datasets(
            ood_dataset_names,
            dataset_builder,
            1. - FLAGS.train_proportion,
            batch_size,
            drop_remainder=FLAGS.drop_remainder_for_eval)
        test_datasets.update(ood_datasets)
    extra_kwargs = {}
    if FLAGS.dataset == 'cifar100':
        data_dir = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                data_dir=data_dir,
                severity=severity,
                split=tfds.Split.TEST,
                drop_remainder=FLAGS.drop_remainder_for_eval,
                **extra_kwargs).load(batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet_sngp(
        input_shape=ds_info.features['image'].shape,
        batch_size=FLAGS.total_batch_size // FLAGS.num_cores,
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        l2=0.,
        use_mc_dropout=FLAGS.use_mc_dropout,
        use_filterwise_dropout=FLAGS.use_filterwise_dropout,
        dropout_rate=FLAGS.dropout_rate,
        use_gp_layer=FLAGS.use_gp_layer,
        gp_input_dim=FLAGS.gp_input_dim,
        gp_hidden_dim=FLAGS.gp_hidden_dim,
        gp_scale=FLAGS.gp_scale,
        gp_bias=FLAGS.gp_bias,
        gp_input_normalization=FLAGS.gp_input_normalization,
        gp_random_feature_type=FLAGS.gp_random_feature_type,
        gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
        gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
        use_spec_norm=FLAGS.use_spec_norm,
        spec_norm_iteration=FLAGS.spec_norm_iteration,
        spec_norm_bound=FLAGS.spec_norm_bound)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
    # Only apply ensemble on the models with the same model architecture
    ensemble_filenames0 = [
        filename for filename in ensemble_filenames
        if f'use_gp_layer:{FLAGS.use_gp_layer}' in filename
        and f'use_spec_norm:{FLAGS.use_spec_norm}' in filename
    ]
    np.random.seed(FLAGS.seed)
    ensemble_filenames = np.random.choice(ensemble_filenames0,
                                          FLAGS.ensemble_size,
                                          replace=True)

    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble filenames: %s', ensemble_filenames)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(
                dataset=name.replace('/', '_'),
                member=m)  # ood dataset name has '/'
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                steps = steps_per_eval if 'ood/' not in name else steps_per_ood[
                    name]
                for _ in range(steps):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits_member = model(features, training=False)
                    if isinstance(logits_member, (list, tuple)):
                        # If model returns a tuple of (logits, covmat), extract both
                        logits_member, covmat_member = logits_member
                        logits_member = ed.layers.utils.mean_field_logits(
                            logits_member, covmat_member,
                            FLAGS.gp_mean_field_factor_ensemble)
                    logits.append(logits_member)

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    if FLAGS.eval_on_ood:
        ood_metrics = ood_utils.create_ood_metrics(ood_dataset_names)
        metrics.update(ood_metrics)
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(
                dataset=name.replace('/', '_'),
                member=m)  # ood dataset name has '/'
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        steps = steps_per_eval if 'ood/' not in name else steps_per_ood[name]
        for step in range(steps):
            inputs = next(test_iterator)
            labels = inputs['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
            negative_log_likelihood_metric.add_batch(logits, labels=labels)
            negative_log_likelihood = list(
                negative_log_likelihood_metric.result().values())[0]
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            logits_mean = tf.reduce_mean(logits, axis=0)
            if name == 'clean':
                gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
                gibbs_ce_metric.add_batch(logits, labels=labels)
                gibbs_ce = list(gibbs_ce_metric.result().values())[0]
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
            elif name.startswith('ood/'):
                ood_labels = 1 - inputs['is_in_distribution']  # pytype: disable=unsupported-operands
                if FLAGS.dempster_shafer_ood:
                    ood_scores = ood_utils.DempsterShaferUncertainty(
                        logits_mean)
                else:
                    ood_scores = 1 - tf.reduce_max(probs, axis=-1)

                for metric_name, metric in metrics.items():
                    if name in metric_name:
                        metric.update_state(ood_labels, ood_scores)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].add_batch(
                    probs, label=labels)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Metrics from Robustness Metrics (like ECE) will return a dict with a
    # single key/value, instead of a scalar.
    total_results = {
        k: (list(v.values())[0] if isinstance(v, dict) else v)
        for k, v in total_results.items()
    }
    logging.info('Metrics: %s', total_results)