Ejemplo n.º 1
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    data_dir = utils.get_data_dir_from_flags(FLAGS)
    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = ((FLAGS.per_core_batch_size // FLAGS.ensemble_size) *
                  FLAGS.num_cores)
    train_dataset_size = ds_info.splits['train'].num_examples
    steps_per_epoch = train_dataset_size // batch_size
    test_dataset_size = ds_info.splits['test'].num_examples
    steps_per_eval = test_dataset_size // batch_size
    num_classes = ds_info.features['label'].num_classes

    train_builder = ub.datasets.get(FLAGS.dataset,
                                    data_dir=data_dir,
                                    download_data=FLAGS.download_data,
                                    split=tfds.Split.TRAIN,
                                    validation_percent=1. -
                                    FLAGS.train_proportion)
    train_dataset = train_builder.load(batch_size=batch_size)
    validation_dataset = None
    steps_per_validation = 0
    if FLAGS.train_proportion < 1.0:
        validation_builder = ub.datasets.get(FLAGS.dataset,
                                             data_dir=data_dir,
                                             download_data=FLAGS.download_data,
                                             split=tfds.Split.VALIDATION,
                                             validation_percent=1. -
                                             FLAGS.train_proportion)
        validation_dataset = validation_builder.load(batch_size=batch_size)
        validation_dataset = strategy.experimental_distribute_dataset(
            validation_dataset)
        steps_per_validation = validation_builder.num_examples // batch_size
    clean_test_builder = ub.datasets.get(FLAGS.dataset,
                                         data_dir=data_dir,
                                         download_data=FLAGS.download_data,
                                         split=tfds.Split.TEST)
    clean_test_dataset = clean_test_builder.load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    steps_per_epoch = train_builder.num_examples // batch_size
    steps_per_eval = clean_test_builder.num_examples // batch_size
    num_classes = 100 if FLAGS.dataset == 'cifar100' else 10
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar100':
            data_dir = FLAGS.cifar100_c_path
        corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
        for corruption_type in corruption_types:
            for severity in range(1, 6):
                dataset = ub.datasets.get(
                    f'{FLAGS.dataset}_corrupted',
                    corruption_type=corruption_type,
                    data_dir=data_dir,
                    severity=severity,
                    split=tfds.Split.TEST).load(batch_size=batch_size)
                test_datasets[f'{corruption_type}_{severity}'] = (
                    strategy.experimental_distribute_dataset(dataset))

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras model')
        model = ub.models.wide_resnet_rank1(
            input_shape=(32, 32, 3),
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            alpha_initializer=FLAGS.alpha_initializer,
            gamma_initializer=FLAGS.gamma_initializer,
            alpha_regularizer=FLAGS.alpha_regularizer,
            gamma_regularizer=FLAGS.gamma_regularizer,
            use_additive_perturbation=FLAGS.use_additive_perturbation,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            dropout_rate=FLAGS.dropout_rate,
            prior_mean=FLAGS.prior_mean,
            prior_stddev=FLAGS.prior_stddev)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/kl':
            tf.keras.metrics.Mean(),
            'train/kl_scale':
            tf.keras.metrics.Mean(),
            'train/elbo':
            tf.keras.metrics.Mean(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/kl':
            tf.keras.metrics.Mean(),
            'test/elbo':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        eval_dataset_splits = ['test']
        if validation_dataset:
            metrics.update({
                'validation/negative_log_likelihood':
                tf.keras.metrics.Mean(),
                'validation/kl':
                tf.keras.metrics.Mean(),
                'validation/elbo':
                tf.keras.metrics.Mean(),
                'validation/accuracy':
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'validation/ece':
                rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            })
            eval_dataset_splits += ['validation']
        for i in range(FLAGS.ensemble_size):
            for dataset_split in eval_dataset_splits:
                metrics[
                    f'{dataset_split}/nll_member_{i}'] = tf.keras.metrics.Mean(
                    )
                metrics[f'{dataset_split}/accuracy_member_{i}'] = (
                    tf.keras.metrics.SparseCategoricalAccuracy())
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, 6):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/kl_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/elbo_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    def compute_l2_loss(model):
        filtered_variables = []
        for var in model.trainable_variables:
            # Apply l2 on the BN parameters and bias terms. This
            # excludes only fast weight approximate posterior/prior parameters,
            # but pay caution to their naming scheme.
            if ('kernel' in var.name or 'batch_norm' in var.name
                    or 'bias' in var.name):
                filtered_variables.append(tf.reshape(var, (-1, )))
        l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
            tf.concat(filtered_variables, axis=0))
        return l2_loss

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            if FLAGS.ensemble_size > 1:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
                labels = tf.tile(labels, [FLAGS.ensemble_size])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = compute_l2_loss(model)
                kl = sum(model.losses) / train_dataset_size
                kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
                kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
                kl_scale = tf.minimum(1., kl_scale)
                kl_loss = kl_scale * kl

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss + kl_loss
                scaled_loss = loss / strategy.num_replicas_in_sync
                elbo = -(negative_log_likelihood + l2_loss + kl)

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate implementation.
            if FLAGS.fast_weight_lr_multiplier != 1.0:
                grads_and_vars = []
                for grad, var in zip(grads, model.trainable_variables):
                    # Apply different learning rate on the fast weight approximate
                    # posterior/prior parameters. This is excludes BN and slow weights,
                    # but pay caution to the naming scheme.
                    if ('kernel' not in var.name
                            and 'batch_norm' not in var.name
                            and 'bias' not in var.name):
                        grads_and_vars.append(
                            (grad * FLAGS.fast_weight_lr_multiplier, var))
                    else:
                        grads_and_vars.append((grad, var))
                optimizer.apply_gradients(grads_and_vars)
            else:
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/kl'].update_state(kl)
            metrics['train/kl_scale'].update_state(kl_scale)
            metrics['train/elbo'].update_state(elbo)
            metrics['train/loss'].update_state(loss)
            metrics['train/accuracy'].update_state(labels, probs)
            metrics['train/ece'].add_batch(probs, label=labels)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_split, dataset_name, num_steps):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            if FLAGS.ensemble_size > 1:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
            logits = tf.reshape([
                model(images, training=False)
                for _ in range(FLAGS.num_eval_samples)
            ], [FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, num_classes])
            probs = tf.nn.softmax(logits)

            if FLAGS.ensemble_size > 1:
                per_probs = tf.reduce_mean(probs,
                                           axis=0)  # marginalize samples
                for i in range(FLAGS.ensemble_size):
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics[f'{dataset_split}/nll_member_{i}'].update_state(
                        member_loss)
                    metrics[
                        f'{dataset_split}/accuracy_member_{i}'].update_state(
                            labels, member_probs)

            # Negative log marginal likelihood computed in a numerically-stable way.
            labels_broadcasted = tf.broadcast_to(
                labels,
                [FLAGS.num_eval_samples, FLAGS.ensemble_size, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) +
                tf.math.log(float(FLAGS.num_eval_samples *
                                  FLAGS.ensemble_size)))
            probs = tf.math.reduce_mean(probs, axis=[0, 1])  # marginalize

            l2_loss = compute_l2_loss(model)
            kl = sum(model.losses) / test_dataset_size
            elbo = -(negative_log_likelihood + l2_loss + kl)

            if dataset_name == 'clean':
                metrics[
                    f'{dataset_split}/negative_log_likelihood'].update_state(
                        negative_log_likelihood)
                metrics[f'{dataset_split}/kl'].update_state(kl)
                metrics[f'{dataset_split}/elbo'].update_state(elbo)
                metrics[f'{dataset_split}/accuracy'].update_state(
                    labels, probs)
                metrics[f'{dataset_split}/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/kl_{}'.format(
                    dataset_name)].update_state(kl)
                corrupt_metrics['test/elbo_{}'.format(
                    dataset_name)].update_state(elbo)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)

        for _ in tf.range(tf.cast(num_steps, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        train_step(train_iterator)
        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)

        if validation_dataset:
            validation_iterator = iter(validation_dataset)
            test_step(validation_iterator, 'validation', 'clean',
                      steps_per_validation)
        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_step(test_iterator, 'test', dataset_name, steps_per_eval)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        if FLAGS.ensemble_size > 1:
            for i in range(FLAGS.ensemble_size):
                logging.info(
                    'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                    metrics['test/nll_member_{}'.format(i)].result(),
                    metrics['test/accuracy_member_{}'.format(i)].result() *
                    100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate': FLAGS.base_learning_rate,
            'one_minus_momentum': FLAGS.one_minus_momentum,
            'l2': FLAGS.l2,
            'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier,
            'num_eval_samples': FLAGS.num_eval_samples,
        })
Ejemplo n.º 2
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    data_dir = utils.get_data_dir_from_flags(FLAGS)
    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores //
                  FLAGS.num_dropout_samples_training)
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = ds_info.splits['train'].num_examples // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size
    num_classes = ds_info.features['label'].num_classes

    train_builder = ub.datasets.get(FLAGS.dataset,
                                    data_dir=data_dir,
                                    download_data=FLAGS.download_data,
                                    split=tfds.Split.TRAIN,
                                    validation_percent=1. -
                                    FLAGS.train_proportion)
    train_dataset = train_builder.load(batch_size=batch_size)
    validation_dataset = None
    steps_per_validation = 0
    if FLAGS.train_proportion < 1.0:
        validation_builder = ub.datasets.get(FLAGS.dataset,
                                             data_dir=data_dir,
                                             split=tfds.Split.VALIDATION,
                                             validation_percent=1. -
                                             FLAGS.train_proportion)
        validation_dataset = validation_builder.load(
            batch_size=test_batch_size)
        validation_dataset = strategy.experimental_distribute_dataset(
            validation_dataset)
        steps_per_validation = validation_builder.num_examples // test_batch_size
    clean_test_builder = ub.datasets.get(FLAGS.dataset,
                                         data_dir=data_dir,
                                         split=tfds.Split.TEST)
    clean_test_dataset = clean_test_builder.load(batch_size=test_batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    steps_per_epoch = train_builder.num_examples // batch_size
    steps_per_eval = clean_test_builder.num_examples // batch_size
    num_classes = 100 if FLAGS.dataset == 'cifar100' else 10
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar100':
            data_dir = FLAGS.cifar100_c_path
        corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
        for corruption_type in corruption_types:
            for severity in range(1, 6):
                dataset = ub.datasets.get(
                    f'{FLAGS.dataset}_corrupted',
                    corruption_type=corruption_type,
                    data_dir=data_dir,
                    severity=severity,
                    split=tfds.Split.TEST).load(batch_size=test_batch_size)
                test_datasets[f'{corruption_type}_{severity}'] = (
                    strategy.experimental_distribute_dataset(dataset))

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet_dropout(
            input_shape=(32, 32, 3),
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            l2=FLAGS.l2,
            dropout_rate=FLAGS.dropout_rate,
            residual_dropout=FLAGS.residual_dropout,
            filterwise_dropout=FLAGS.filterwise_dropout)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if validation_dataset:
            metrics.update({
                'validation/negative_log_likelihood':
                tf.keras.metrics.Mean(),
                'validation/accuracy':
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'validation/ece':
                rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            })
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, 6):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images,
                             [FLAGS.num_dropout_samples_training, 1, 1, 1])
            labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_split, dataset_name, num_steps):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']

            logits_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics[
                    f'{dataset_split}/negative_log_likelihood'].update_state(
                        negative_log_likelihood)
                metrics[f'{dataset_split}/accuracy'].update_state(
                    labels, probs)
                metrics[f'{dataset_split}/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)

        for _ in tf.range(tf.cast(num_steps, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        train_step(train_iterator)

        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)

        if validation_dataset:
            validation_iterator = iter(validation_dataset)
            test_step(validation_iterator, 'validation', 'clean',
                      steps_per_validation)
        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_start_time = time.time()
            test_step(test_iterator, 'test', dataset_name, steps_per_eval)
            ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
            metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate':
            FLAGS.base_learning_rate,
            'one_minus_momentum':
            FLAGS.one_minus_momentum,
            'l2':
            FLAGS.l2,
            'dropout_rate':
            FLAGS.dropout_rate,
            'num_dropout_samples':
            FLAGS.num_dropout_samples,
            'num_dropout_samples_training':
            FLAGS.num_dropout_samples_training,
        })
Ejemplo n.º 3
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    data_dir = utils.get_data_dir_from_flags(FLAGS)
    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
    batch_size = per_core_batch_size * FLAGS.num_cores
    check_bool = FLAGS.train_proportion > 0 and FLAGS.train_proportion <= 1
    assert check_bool, 'Proportion of train set has to meet 0 < prop <= 1.'

    drop_remainder_validation = True
    if not FLAGS.use_gpu:
        # This has to be True for TPU traing, otherwise the batchsize of images in
        # the validation set can't be determined by TPU compile.
        assert drop_remainder_validation, 'drop_remainder must be True in TPU mode.'

    validation_percent = 1 - FLAGS.train_proportion
    train_dataset = ub.datasets.get(
        FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.TRAIN,
        validation_percent=validation_percent).load(batch_size=batch_size)
    validation_dataset = ub.datasets.get(
        FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.VALIDATION,
        validation_percent=validation_percent,
        drop_remainder=drop_remainder_validation).load(batch_size=batch_size)
    validation_dataset = validation_dataset.repeat()
    clean_test_dataset = ub.datasets.get(
        FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.TEST).load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    validation_dataset = strategy.experimental_distribute_dataset(
        validation_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar100':
            data_dir = FLAGS.cifar100_c_path
        corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
        for corruption_type in corruption_types:
            for severity in range(1, 6):
                dataset = ub.datasets.get(
                    f'{FLAGS.dataset}_corrupted',
                    corruption_type=corruption_type,
                    data_dir=data_dir,
                    severity=severity,
                    split=tfds.Split.TEST).load(batch_size=batch_size)
                test_datasets[f'{corruption_type}_{severity}'] = (
                    strategy.experimental_distribute_dataset(dataset))

    ds_info = tfds.builder(FLAGS.dataset).info
    train_sample_size = ds_info.splits[
        'train'].num_examples * FLAGS.train_proportion
    steps_per_epoch = int(train_sample_size / batch_size)
    train_sample_size = int(train_sample_size)

    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    logging.info('Building Keras model.')
    depth = 28
    width = 10

    dict_ranges = {'min': FLAGS.min_l2_range, 'max': FLAGS.max_l2_range}
    ranges = [dict_ranges for _ in range(6)]  # 6 independent l2 parameters
    model_config = {
        'key_to_index': {
            'input_conv_l2_kernel': 0,
            'group_l2_kernel': 1,
            'group_1_l2_kernel': 2,
            'group_2_l2_kernel': 3,
            'dense_l2_kernel': 4,
            'dense_l2_bias': 5,
        },
        'ranges': ranges,
        'test': None
    }
    lambdas_config = LambdaConfig(model_config['ranges'],
                                  model_config['key_to_index'])

    if FLAGS.e_body_hidden_units > 0:
        e_body_arch = '({},)'.format(FLAGS.e_body_hidden_units)
    else:
        e_body_arch = '()'
    e_shared_arch = '()'
    e_activation = 'tanh'
    filters_resnet = [16]
    for i in range(0, 3):  # 3 groups of blocks
        filters_resnet.extend([16 * width * 2**i] *
                              9)  # 9 layers in each block
    # e_head dim for conv2d is just the number of filters (only
    # kernel) and twice num of classes for the last dense layer (kernel + bias)
    e_head_dims = [x for x in filters_resnet] + [2 * num_classes]

    with strategy.scope():
        e_models = e_factory(
            lambdas_config.input_shape,
            e_head_dims=e_head_dims,
            e_body_arch=eval(e_body_arch),  # pylint: disable=eval-used
            e_shared_arch=eval(e_shared_arch),  # pylint: disable=eval-used
            activation=e_activation,
            use_bias=FLAGS.e_model_use_bias,
            e_head_init=FLAGS.init_emodels_stddev)

        model = wide_resnet_hyperbatchensemble(
            input_shape=ds_info.features['image'].shape,
            depth=depth,
            width_multiplier=width,
            num_classes=num_classes,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            config=lambdas_config,
            e_models=e_models,
            l2_batchnorm_layer=FLAGS.l2_batchnorm,
            regularize_fast_weights=FLAGS.regularize_fast_weights,
            fast_weights_eq_contraint=FLAGS.fast_weights_eq_contraint,
            version=2)

        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # build hyper-batchensemble complete -------------------------

        # Initialize Lambda distributions for tuning
        lambdas_mean = tf.reduce_mean(
            log_uniform_mean([lambdas_config.log_min, lambdas_config.log_max]))
        lambdas0 = tf.random.normal((FLAGS.ensemble_size, lambdas_config.dim),
                                    lambdas_mean,
                                    0.1 * FLAGS.ens_init_delta_bounds)
        lower0 = lambdas0 - tf.constant(FLAGS.ens_init_delta_bounds)
        lower0 = tf.maximum(lower0, 1e-8)
        upper0 = lambdas0 + tf.constant(FLAGS.ens_init_delta_bounds)

        log_lower = tf.Variable(tf.math.log(lower0))
        log_upper = tf.Variable(tf.math.log(upper0))
        lambda_parameters = [log_lower, log_upper]  # these variables are tuned
        clip_lambda_parameters(lambda_parameters, lambdas_config)

        # Optimizer settings to train model weights
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        # Note: Here, we don't divide the epochs by 200 as for the other uncertainty
        # baselines.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [int(l) for l in FLAGS.lr_decay_epochs]

        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)

        # tuner used for optimizing lambda_parameters
        tuner = tf.keras.optimizers.Adam(FLAGS.lr_tuning)

        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'train/disagreement':
            tf.keras.metrics.Mean(),
            'train/average_kl':
            tf.keras.metrics.Mean(),
            'train/cosine_similarity':
            tf.keras.metrics.Mean(),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/gibbs_nll':
            tf.keras.metrics.Mean(),
            'test/gibbs_accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/disagreement':
            tf.keras.metrics.Mean(),
            'test/average_kl':
            tf.keras.metrics.Mean(),
            'test/cosine_similarity':
            tf.keras.metrics.Mean(),
            'validation/loss':
            tf.keras.metrics.Mean(),
            'validation/loss_entropy':
            tf.keras.metrics.Mean(),
            'validation/loss_ce':
            tf.keras.metrics.Mean()
        }
        corrupt_metrics = {}

        for i in range(FLAGS.ensemble_size):
            metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
            metrics['test/accuracy_member_{}'.format(i)] = (
                tf.keras.metrics.SparseCategoricalAccuracy())
        if FLAGS.corruptions_interval > 0:
            for intensity in range(1, 6):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model,
                                         lambda_parameters=lambda_parameters,
                                         optimizer=optimizer)

        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint and FLAGS.restore_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            # generate lambdas
            lambdas = log_uniform_sample(per_core_batch_size,
                                         lambda_parameters)
            lambdas = tf.reshape(lambdas,
                                 (FLAGS.ensemble_size * per_core_batch_size,
                                  lambdas_config.dim))

            with tf.GradientTape() as tape:
                logits = model([images, lambdas], training=True)

                if FLAGS.use_gibbs_ce:
                    # Average of single model CEs
                    # tiling of labels should be only done for Gibbs CE loss
                    labels = tf.tile(labels, [FLAGS.ensemble_size])
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    # Ensemble CE uses no tiling of the labels
                    negative_log_likelihood = ensemble_crossentropy(
                        labels, logits, FLAGS.ensemble_size)
                # Note: Divide l2_loss by sample_size (this differs from uncertainty_
                # baselines implementation.)
                l2_loss = sum(model.losses) / train_sample_size
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate for fast weights.
            grads_and_vars = []
            for grad, var in zip(grads, model.trainable_variables):
                if (('alpha' in var.name or 'gamma' in var.name)
                        and 'batch_norm' not in var.name):
                    grads_and_vars.append(
                        (grad * FLAGS.fast_weight_lr_multiplier, var))
                else:
                    grads_and_vars.append((grad, var))
            optimizer.apply_gradients(grads_and_vars)

            probs = tf.nn.softmax(logits)
            per_probs = tf.split(probs,
                                 num_or_size_splits=FLAGS.ensemble_size,
                                 axis=0)
            per_probs_stacked = tf.stack(per_probs, axis=0)
            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)
            diversity = rm.metrics.AveragePairwiseDiversity()
            diversity.add_batch(per_probs_stacked,
                                num_models=FLAGS.ensemble_size)
            diversity_results = diversity.result()
            for k, v in diversity_results.items():
                metrics['train/' + k].update_state(v)

            if grads_and_vars:
                grads, _ = zip(*grads_and_vars)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def tuning_step(iterator):
        """Tuning StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            with tf.GradientTape(watch_accessed_variables=False) as tape:
                tape.watch(lambda_parameters)

                # sample lambdas
                if FLAGS.sample_and_tune:
                    lambdas = log_uniform_sample(per_core_batch_size,
                                                 lambda_parameters)
                else:
                    lambdas = log_uniform_mean(lambda_parameters)
                    lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0)
                lambdas = tf.reshape(lambdas,
                                     (FLAGS.ensemble_size *
                                      per_core_batch_size, lambdas_config.dim))
                # ensemble CE
                logits = model([images, lambdas], training=False)
                ce = ensemble_crossentropy(labels, logits, FLAGS.ensemble_size)
                # entropy penalty for lambda distribution
                entropy = FLAGS.tau * log_uniform_entropy(lambda_parameters)
                loss = ce - entropy
                scaled_loss = loss / strategy.num_replicas_in_sync

            gradients = tape.gradient(loss, lambda_parameters)
            tuner.apply_gradients(zip(gradients, lambda_parameters))

            metrics['validation/loss_ce'].update_state(
                ce / strategy.num_replicas_in_sync)
            metrics['validation/loss_entropy'].update_state(
                entropy / strategy.num_replicas_in_sync)
            metrics['validation/loss'].update_state(scaled_loss)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name, num_eval_samples=0):
        """Evaluation StepFn."""

        n_samples = num_eval_samples if num_eval_samples >= 0 else -num_eval_samples
        if num_eval_samples >= 0:
            # the +1 accounts for the fact that we add the mean of lambdas
            ensemble_size = FLAGS.ensemble_size * (1 + n_samples)
        else:
            ensemble_size = FLAGS.ensemble_size * n_samples

        def step_fn(inputs):
            """Per-Replica StepFn."""
            # Note that we don't use tf.tile for labels here
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [ensemble_size, 1, 1, 1])

            # get lambdas
            samples = log_uniform_sample(n_samples, lambda_parameters)
            if num_eval_samples >= 0:
                lambdas = log_uniform_mean(lambda_parameters)
                lambdas = tf.expand_dims(lambdas, 1)
                lambdas = tf.concat((lambdas, samples), 1)
            else:
                lambdas = samples

            # lambdas with shape (ens size, samples, dim of lambdas)
            rep_lambdas = tf.repeat(lambdas, per_core_batch_size, axis=1)
            rep_lambdas = tf.reshape(rep_lambdas,
                                     (ensemble_size * per_core_batch_size, -1))

            # eval on testsets
            logits = model([images, rep_lambdas], training=False)
            probs = tf.nn.softmax(logits)
            per_probs = tf.split(probs,
                                 num_or_size_splits=ensemble_size,
                                 axis=0)

            # per member performance and gibbs performance (average per member perf)
            if dataset_name == 'clean':
                for i in range(FLAGS.ensemble_size):
                    # we record the first sample of lambdas per batch-ens member
                    first_member_index = i * (ensemble_size //
                                              FLAGS.ensemble_size)
                    member_probs = per_probs[first_member_index]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)

                labels_tile = tf.tile(labels, [ensemble_size])
                metrics['test/gibbs_nll'].update_state(
                    tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels_tile, logits, from_logits=True)))
                metrics['test/gibbs_accuracy'].update_state(labels_tile, probs)

            # ensemble performance
            negative_log_likelihood = ensemble_crossentropy(
                labels, logits, ensemble_size)
            probs = tf.reduce_mean(per_probs, axis=0)
            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)

            if dataset_name == 'clean':
                per_probs_stacked = tf.stack(per_probs, axis=0)
                diversity = rm.metrics.AveragePairwiseDiversity()
                diversity.add_batch(per_probs_stacked,
                                    num_models=ensemble_size)
                diversity_results = diversity.result()
                for k, v in diversity_results.items():
                    metrics['test/' + k].update_state(v)

        strategy.run(step_fn, args=(next(iterator), ))

    logging.info('--- Starting training using %d examples. ---',
                 train_sample_size)
    train_iterator = iter(train_dataset)
    validation_iterator = iter(validation_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)
            do_tuning = (epoch >= FLAGS.tuning_warmup_epochs)
            if do_tuning and ((step + 1) % FLAGS.tuning_every_x_step == 0):
                tuning_step(validation_iterator)
                # clip lambda parameters if outside of range
                clip_lambda_parameters(lambda_parameters, lambdas_config)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        # evaluate on test data
        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_step(test_iterator, dataset_name, FLAGS.num_eval_samples)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types)
        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Validation Loss: %.4f, CE: %.4f, Entropy: %.4f',
                     metrics['validation/loss'].result(),
                     metrics['validation/loss_ce'].result(),
                     metrics['validation/loss_entropy'].result())
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)

        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update({
            name: metric.result()
            for name, metric in corrupt_metrics.items()
        })
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        # save checkpoint and lambdas config
        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            lambdas_cf = lambdas_config.get_config()
            filepath = os.path.join(FLAGS.output_dir, 'lambdas_config.p')
            with tf.io.gfile.GFile(filepath, 'wb') as fp:
                pickle.dump(lambdas_cf, fp, protocol=pickle.HIGHEST_PROTOCOL)
            logging.info('Saved checkpoint to %s', checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate':
            FLAGS.base_learning_rate,
            'one_minus_momentum':
            FLAGS.one_minus_momentum,
            'l2':
            FLAGS.l2,
            'random_sign_init':
            FLAGS.random_sign_init,
            'fast_weight_lr_multiplier':
            FLAGS.fast_weight_lr_multiplier,
        })
Ejemplo n.º 4
0
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    data_dir = utils.get_data_dir_from_flags(FLAGS)
    dataset = ub.datasets.get(
        FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.TEST).load(batch_size=batch_size)
    validation_percent = 1. - FLAGS.train_proportion
    val_dataset = ub.datasets.get(
        dataset_name=FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.VALIDATION,
        validation_percent=validation_percent,
        drop_remainder=False).load(batch_size=batch_size)
    steps_per_val_eval = int(ds_info.splits['train'].num_examples *
                             validation_percent) // batch_size

    test_datasets = {'clean': dataset}
    if FLAGS.dataset == 'cifar100':
        data_dir = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                data_dir=data_dir,
                severity=severity,
                split=tfds.Split.TEST).load(batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet(input_shape=ds_info.features['image'].shape,
                                  depth=28,
                                  width_multiplier=10,
                                  num_classes=num_classes,
                                  l2=0.)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints
    ensemble_filenames = parse_checkpoint_dir(FLAGS.checkpoint_dir)

    model_pool_size = len(ensemble_filenames)
    logging.info('Model pool size: %s', model_pool_size)
    logging.info('Ensemble size: %s', FLAGS.ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 FLAGS.ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Compute the logits on the validation set
    val_logits, val_labels = [], []
    for m, ensemble_filename in enumerate(ensemble_filenames):
        # Enforce memory clean-up
        tf.keras.backend.clear_session()
        checkpoint.restore(ensemble_filename)
        val_iterator = iter(val_dataset)
        val_logits_m = []
        for _ in range(steps_per_val_eval):
            inputs = next(val_iterator)
            features = inputs['features']
            labels = inputs['labels']
            val_logits_m.append(model(features, training=False))
            if m == 0:
                val_labels.append(labels)

        val_logits.append(tf.concat(val_logits_m, axis=0))
        if m == 0:
            val_labels = tf.concat(val_labels, axis=0)

        percent = (m + 1.) / model_pool_size
        message = ('{:.1%} completion for prediction on validation set: '
                   'model {:d}/{:d}.'.format(percent, m + 1, model_pool_size))
        logging.info(message)

    selected_members, val_acc, val_nll = greedy_selection(
        val_logits, val_labels, FLAGS.ensemble_size, FLAGS.greedy_objective)
    unique_selected_members = list(set(selected_members))
    message = ('Members selected by greedy procedure: {} (with {} unique '
               'member(s))\n\t{}').format(
                   selected_members, len(unique_selected_members),
                   [ensemble_filenames[i] for i in selected_members])
    logging.info(message)
    val_metrics = {
        'val/accuracy': tf.keras.metrics.Mean(),
        'val/negative_log_likelihood': tf.keras.metrics.Mean()
    }
    val_metrics['val/accuracy'].update_state(val_acc)
    val_metrics['val/negative_log_likelihood'].update_state(val_nll)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, member_id in enumerate(unique_selected_members):
        ensemble_filename = ensemble_filenames[member_id]
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name,
                                                       member=member_id)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits.append(model(features, training=False))

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())

            numerator = m * num_datasets + (n + 1)
            denominator = len(unique_selected_members) * num_datasets
            percent = numerator / denominator
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1,
                                           len(unique_selected_members), n + 1,
                                           num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    metrics.update(val_metrics)
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
    for i in range(len(unique_selected_members)):
        metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
        metrics['test/accuracy_member_{}'.format(i)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
    test_diversity = {
        'test/disagreement': tf.keras.metrics.Mean(),
        'test/average_kl': tf.keras.metrics.Mean(),
        'test/cosine_similarity': tf.keras.metrics.Mean(),
    }
    metrics.update(test_diversity)

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for member_id in selected_members:
            filename = '{dataset}_{member}.npy'.format(dataset=name,
                                                       member=member_id)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval):
            labels = next(test_iterator)['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
            negative_log_likelihood_metric.add_batch(logits, labels=labels)
            negative_log_likelihood = list(
                negative_log_likelihood_metric.result().values())[0]
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
                gibbs_ce_metric.add_batch(logits, labels=labels)
                gibbs_ce = list(gibbs_ce_metric.result().values())[0]
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)

                # Attention must be paid to deal with duplicated members:
                # e.g.,
                #.    selected_members = [2, 7, 3, 3]
                #     unique_selected_members = [2, 3, 7]
                #     selected_members.index(3) --> 2
                for member_id in unique_selected_members:
                    i = selected_members.index(member_id)
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)
                    diversity = rm.metrics.AveragePairwiseDiversity()
                    diversity.add_batch(per_probs, num_models=len(per_probs))
                    diversity_results = diversity.result()
                for k, v in diversity_results.items():
                    test_diversity['test/' + k].update_state(v)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].add_batch(
                    probs, label=labels)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Metrics from Robustness Metrics (like ECE) will return a dict with a
    # single key/value, instead of a scalar.
    total_results = {
        k: (list(v.values())[0] if isinstance(v, dict) else v)
        for k, v in total_results.items()
    }

    logging.info('Metrics: %s', total_results)
Ejemplo n.º 5
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    data_dir = utils.get_data_dir_from_flags(FLAGS)
    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores

    if FLAGS.dataset == 'cifar10':
        dataset_builder_class = ub.datasets.Cifar10Dataset
    else:
        dataset_builder_class = ub.datasets.Cifar100Dataset
    train_builder = dataset_builder_class(data_dir=data_dir,
                                          download_data=FLAGS.download_data,
                                          split=tfds.Split.TRAIN,
                                          use_bfloat16=FLAGS.use_bfloat16,
                                          validation_percent=1. -
                                          FLAGS.train_proportion)
    train_dataset = train_builder.load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)

    validation_dataset = None
    steps_per_validation = 0
    if FLAGS.train_proportion < 1.0:
        validation_builder = dataset_builder_class(
            data_dir=data_dir,
            split=tfds.Split.VALIDATION,
            use_bfloat16=FLAGS.use_bfloat16,
            validation_percent=1. - FLAGS.train_proportion)
        validation_dataset = validation_builder.load(batch_size=batch_size)
        validation_dataset = strategy.experimental_distribute_dataset(
            validation_dataset)
        steps_per_validation = validation_builder.num_examples // batch_size

    clean_test_dataset_builder = dataset_builder_class(
        data_dir=data_dir,
        split=tfds.Split.TEST,
        use_bfloat16=FLAGS.use_bfloat16)
    clean_test_dataset = clean_test_dataset_builder.load(
        batch_size=test_batch_size)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    steps_per_epoch = train_builder.num_examples // batch_size
    steps_per_eval = clean_test_dataset_builder.num_examples // batch_size
    num_classes = 100 if FLAGS.dataset == 'cifar100' else 10
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_dataset = utils.load_cifar10_c
        else:
            load_c_dataset = functools.partial(utils.load_cifar100_c,
                                               path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset = load_c_dataset(corruption_name=corruption,
                                         corruption_intensity=intensity,
                                         batch_size=test_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet_condconv(
            input_shape=(32, 32, 3),
            depth=28,
            width_multiplier=FLAGS.resnet_width_multiplier,
            num_classes=num_classes,
            num_experts=FLAGS.num_experts,
            per_core_batch_size=FLAGS.per_core_batch_size,
            use_cond_dense=FLAGS.use_cond_dense,
            reduce_dense_outputs=FLAGS.reduce_dense_outputs,
            cond_placement=FLAGS.cond_placement,
            routing_fn=FLAGS.routing_fn,
            normalize_routing=FLAGS.normalize_routing,
            normalize_dense_routing=FLAGS.normalize_dense_routing,
            top_k=FLAGS.top_k,
            routing_pooling=FLAGS.routing_pooling,
            l2=FLAGS.l2)
        # reuse_routing=FLAGS.reuse_routing,
        # shared_routing_type=FLAGS.shared_routing_type)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        eval_dataset_splits = ['test']
        if validation_dataset:
            metrics.update({
                'validation/negative_log_likelihood':
                tf.keras.metrics.Mean(),
                'validation/accuracy':
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'validation/ece':
                rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            })
            eval_dataset_splits += ['validation']
        if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
            for dataset_split in eval_dataset_splits:
                metrics.update({
                    f'{dataset_split}/nll_poe':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/nll_moe':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/nll_unweighted_poe':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/nll_unweighted_moe':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/unweighted_gibbs_ce':
                    tf.keras.metrics.Mean(),
                    f'{dataset_split}/ece_unweighted_moe':
                    rm.metrics.ExpectedCalibrationError(
                        num_bins=FLAGS.num_bins),
                    f'{dataset_split}/accuracy_unweighted_moe':
                    tf.keras.metrics.SparseCategoricalAccuracy(),
                    f'{dataset_split}/ece_poe':
                    rm.metrics.ExpectedCalibrationError(
                        num_bins=FLAGS.num_bins),
                    f'{dataset_split}/accuracy_poe':
                    tf.keras.metrics.SparseCategoricalAccuracy(),
                    f'{dataset_split}/ece_unweighted_poe':
                    rm.metrics.ExpectedCalibrationError(
                        num_bins=FLAGS.num_bins),
                    f'{dataset_split}/accuracy_unweighted_poe':
                    tf.keras.metrics.SparseCategoricalAccuracy(),
                })
                for idx in range(FLAGS.num_experts):
                    metrics[f'{dataset_split}/dense_routing_weight_{idx}'] = (
                        tf.keras.metrics.Mean())
                    metrics[
                        f'{dataset_split}/dense_routing_weight_normalized_{idx}'] = (
                            tf.keras.metrics.Mean())

        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/nll_weighted_moe_{}'.format(
                        dataset_name)] = (tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_weighted_moe_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_weighted_moe_{}'.format(
                        dataset_name)] = (rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    def _process_3d_logits(logits, routing_weights, labels):
        routing_weights_3d = tf.expand_dims(routing_weights, axis=-1)
        weighted_logits = tf.math.reduce_mean(routing_weights_3d * logits,
                                              axis=1)
        unweighted_logits = tf.math.reduce_mean(logits, axis=1)

        probs = tf.nn.softmax(logits)
        unweighted_probs = tf.math.reduce_mean(probs, axis=1)
        weighted_probs = tf.math.reduce_sum(routing_weights_3d * probs, axis=1)

        labels_broadcasted = tf.tile(tf.reshape(labels, (-1, 1)),
                                     (1, FLAGS.num_experts))
        neg_log_likelihoods = tf.keras.losses.sparse_categorical_crossentropy(
            labels_broadcasted, logits, from_logits=True)
        unweighted_gibbs_ce = tf.math.reduce_mean(neg_log_likelihoods)
        weighted_gibbs_ce = tf.math.reduce_mean(
            tf.math.reduce_sum(routing_weights * neg_log_likelihoods, axis=1))
        return {
            'weighted_logits': weighted_logits,
            'unweighted_logits': unweighted_logits,
            'unweighted_probs': unweighted_probs,
            'weighted_probs': weighted_probs,
            'neg_log_likelihoods': neg_log_likelihoods,
            'unweighted_gibbs_ce': unweighted_gibbs_ce,
            'weighted_gibbs_ce': weighted_gibbs_ce
        }

    def _process_3d_logits_train(logits, routing_weights, labels):
        processing_results = _process_3d_logits(logits, routing_weights,
                                                labels)
        if FLAGS.loss == 'gibbs_ce':
            probs = processing_results['weighted_probs']
            negative_log_likelihood = processing_results['weighted_gibbs_ce']
        elif FLAGS.loss == 'unweighted_gibbs_ce':
            probs = processing_results['unweighted_probs']
            negative_log_likelihood = processing_results['unweighted_gibbs_ce']
        elif FLAGS.loss == 'moe':
            probs = processing_results['weighted_probs']
            negative_log_likelihood = tf.math.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(
                    labels, probs, from_logits=False))
        elif FLAGS.loss == 'unweighted_moe':
            probs = processing_results['unweighted_probs']
            negative_log_likelihood = tf.math.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(
                    labels, probs, from_logits=False))
        elif FLAGS.loss == 'poe':
            probs = tf.softmax(processing_results['weighted_logits'])
            negative_log_likelihood = tf.math.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(
                    labels,
                    processing_results['weighted_logits'],
                    from_logits=True))
        elif FLAGS.loss == 'unweighted_poe':
            probs = tf.softmax(processing_results['unweighted_logits'])
            negative_log_likelihood = tf.math.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(
                    labels,
                    processing_results['unweighted_logits'],
                    from_logits=True))
        return probs, negative_log_likelihood

    def _process_3d_logits_test(routing_weights, logits, labels):
        processing_results = _process_3d_logits(logits, routing_weights,
                                                labels)
        nll_poe = tf.math.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels,
                processing_results['weighted_logits'],
                from_logits=True))
        nll_unweighted_poe = tf.math.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels,
                processing_results['unweighted_logits'],
                from_logits=True))
        nll_moe = tf.math.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels,
                processing_results['weighted_probs'],
                from_logits=False))
        nll_unweighted_moe = tf.math.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels,
                processing_results['unweighted_probs'],
                from_logits=False))
        return {
            'nll_poe': nll_poe,
            'nll_moe': nll_moe,
            'nll_unweighted_poe': nll_unweighted_poe,
            'nll_unweighted_moe': nll_unweighted_moe,
            'unweighted_gibbs_ce': processing_results['unweighted_gibbs_ce'],
            'weighted_gibbs_ce': processing_results['weighted_gibbs_ce'],
            'weighted_probs': processing_results['weighted_probs'],
            'unweighted_probs': processing_results['unweighted_probs'],
            'weighted_logits': processing_results['weighted_logits'],
            'unweighted_logits': processing_results['unweighted_logits']
        }

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                # if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                if not isinstance(logits, (list, tuple)):
                    raise ValueError('Logits are not a tuple.')
                # logits is a `Tensor` of shape [batch_size, num_experts, num_classes]
                logits, all_routing_weights = logits
                # routing_weights is a `Tensor` of shape [batch_size, num_experts]
                routing_weights = all_routing_weights[-1]
                if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                    probs, negative_log_likelihood = _process_3d_logits_train(
                        logits, routing_weights, labels)
                else:
                    probs = tf.nn.softmax(logits)
                    # Prior to reduce_mean the NLLs are of the shape [batch, num_experts].
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))

                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, probs)

        for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_split, dataset_name, num_steps):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            logits = model(images, training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            if not isinstance(logits, (list, tuple)):
                raise ValueError('Logits not a tuple')
            # logits is a `Tensor` of shape [batch_size, num_experts, num_classes]
            # routing_weights is a `Tensor` of shape [batch_size, num_experts]
            logits, all_routing_weights = logits
            routing_weights = all_routing_weights[-1]
            if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                results = _process_3d_logits_test(routing_weights, logits,
                                                  labels)
            else:
                probs = tf.nn.softmax(logits)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, probs))

            if dataset_name == 'clean':
                if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                    metrics[f'{dataset_split}/nll_poe'].update_state(
                        results['nll_poe'])
                    metrics[f'{dataset_split}/nll_moe'].update_state(
                        results['nll_moe'])
                    metrics[
                        f'{dataset_split}/nll_unweighted_poe'].update_state(
                            results['nll_unweighted_poe'])
                    metrics[
                        f'{dataset_split}/nll_unweighted_moe'].update_state(
                            results['nll_unweighted_moe'])
                    metrics[
                        f'{dataset_split}/unweighted_gibbs_ce'].update_state(
                            results['unweighted_gibbs_ce'])
                    metrics[
                        f'{dataset_split}/negative_log_likelihood'].update_state(
                            results['weighted_gibbs_ce'])
                    metrics[f'{dataset_split}/ece'].add_batch(
                        results['weighted_probs'], label=labels)
                    metrics[f'{dataset_split}/accuracy'].update_state(
                        labels, results['weighted_probs'])
                    metrics[f'{dataset_split}/ece_unweighted_moe'].add_batch(
                        results['unweighted_probs'], label=labels)
                    metrics[
                        f'{dataset_split}/accuracy_unweighted_moe'].update_state(
                            labels, results['unweighted_probs'])
                    metrics[f'{dataset_split}/ece_poe'].add_batch(
                        results['weighted_logits'], label=labels)
                    metrics[f'{dataset_split}/accuracy_poe'].update_state(
                        labels, results['weighted_logits'])
                    metrics[f'{dataset_split}/ece_unweighted_poe'].add_batch(
                        results['unweighted_logits'], label=labels)
                    metrics[
                        f'{dataset_split}/accuracy_unweighted_poe'].update_state(
                            labels, results['unweighted_logits'])
                    # TODO(ghassen): summarize all routing weights not only last layer's.
                    average_routing_weights = tf.math.reduce_mean(
                        routing_weights, axis=0)
                    routing_weights_sum = tf.math.reduce_sum(
                        average_routing_weights)
                    for idx in range(FLAGS.num_experts):
                        metrics[
                            f'{dataset_split}/dense_routing_weight_{idx}'].update_state(
                                average_routing_weights[idx])
                        key = f'{dataset_split}/dense_routing_weight_normalized_{idx}'
                        metrics[key].update_state(
                            average_routing_weights[idx] / routing_weights_sum)
                    # TODO(ghassen): add more metrics for expert utilization,
                    # load loss and importance/balance loss.
                else:
                    metrics[
                        f'{dataset_split}/negative_log_likelihood'].update_state(
                            negative_log_likelihood)
                    metrics[f'{dataset_split}/accuracy'].update_state(
                        labels, probs)
                    metrics[f'{dataset_split}/ece'].add_batch(probs,
                                                              label=labels)
            else:
                # TODO(ghassen): figure out how to aggregate probs for the OOD case.
                if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
                    corrupt_metrics['test/nll_{}'.format(
                        dataset_name)].update_state(
                            results['unweighted_gibbs_ce'])
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)].update_state(
                            labels, results['unweighted_probs'])
                    corrupt_metrics['test/ece_{}'.format(
                        dataset_name)].add_batch(results['unweighted_probs'],
                                                 label=labels)

                    corrupt_metrics['test/nll_weighted_moe{}'.format(
                        dataset_name)].update_state(
                            results['weighted_gibbs_ce'])
                    corrupt_metrics['test/accuracy_weighted_moe_{}'.format(
                        dataset_name)].update_state(labels,
                                                    results['weighted_probs'])
                    corrupt_metrics['test/ece_weighted_moe{}'.format(
                        dataset_name)].add_batch(results['weighted_probs'],
                                                 label=labels)
                else:
                    corrupt_metrics['test/nll_{}'.format(
                        dataset_name)].update_state(negative_log_likelihood)
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)].update_state(labels, probs)
                    corrupt_metrics['test/ece_{}'.format(
                        dataset_name)].add_batch(probs, label=labels)

        for _ in tf.range(tf.cast(num_steps, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        train_step(train_iterator)

        current_step = (epoch + 1) * steps_per_epoch
        max_steps = steps_per_epoch * FLAGS.train_epochs
        time_elapsed = time.time() - start_time
        steps_per_sec = float(current_step) / time_elapsed
        eta_seconds = (max_steps - current_step) / steps_per_sec
        message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                   'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                       current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                       steps_per_sec, eta_seconds / 60, time_elapsed / 60))
        logging.info(message)

        if validation_dataset:
            validation_iterator = iter(validation_dataset)
            test_step(validation_iterator, 'validation', 'clean',
                      steps_per_validation)
        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_start_time = time.time()
            test_step(test_iterator, 'test', dataset_name, steps_per_eval)
            ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
            metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate':
            FLAGS.base_learning_rate,
            'one_minus_momentum':
            FLAGS.one_minus_momentum,
            'l2':
            FLAGS.l2,
            'dropout_rate':
            FLAGS.dropout_rate,
            'num_dropout_samples':
            FLAGS.num_dropout_samples,
            'num_dropout_samples_training':
            FLAGS.num_dropout_samples_training,
        })
Ejemplo n.º 6
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)
    # Split the seed into a 2-tuple, for passing into dataset builder.
    dataset_seed = (FLAGS.seed, FLAGS.seed + 1)

    data_dir = utils.get_data_dir_from_flags(FLAGS)
    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores //
                  FLAGS.num_dropout_samples_training)
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    num_classes = 10 if FLAGS.dataset == 'cifar10' else 100

    aug_params = {
        'augmix': FLAGS.augmix,
        'aug_count': FLAGS.aug_count,
        'augmix_depth': FLAGS.augmix_depth,
        'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
        'augmix_width': FLAGS.augmix_width,
        'ensemble_size': 1,
        'mixup_alpha': FLAGS.mixup_alpha,
    }
    validation_proportion = 1. - FLAGS.train_proportion
    use_validation_set = validation_proportion > 0.
    if FLAGS.dataset == 'cifar10':
        dataset_builder_class = ub.datasets.Cifar10Dataset
    else:
        dataset_builder_class = ub.datasets.Cifar100Dataset
    train_dataset_builder = dataset_builder_class(
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.TRAIN,
        use_bfloat16=FLAGS.use_bfloat16,
        aug_params=aug_params,
        validation_percent=validation_proportion,
        seed=dataset_seed)
    train_dataset = train_dataset_builder.load(batch_size=batch_size)
    train_sample_size = train_dataset_builder.num_examples
    if validation_proportion > 0.:
        validation_dataset_builder = dataset_builder_class(
            data_dir=data_dir,
            download_data=FLAGS.download_data,
            split=tfds.Split.VALIDATION,
            use_bfloat16=FLAGS.use_bfloat16,
            validation_percent=validation_proportion)
        validation_dataset = validation_dataset_builder.load(
            batch_size=batch_size)
        validation_dataset = strategy.experimental_distribute_dataset(
            validation_dataset)
        val_sample_size = validation_dataset_builder.num_examples
        steps_per_val = steps_per_epoch = int(val_sample_size / batch_size)
    clean_test_dataset_builder = dataset_builder_class(
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.TEST,
        use_bfloat16=FLAGS.use_bfloat16)
    clean_test_dataset = clean_test_dataset_builder.load(
        batch_size=test_batch_size)

    steps_per_epoch = int(train_sample_size / batch_size)
    steps_per_epoch = train_dataset_builder.num_examples // batch_size
    steps_per_eval = clean_test_dataset_builder.num_examples // batch_size
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_dataset = utils.load_cifar10_c
        else:
            load_c_dataset = functools.partial(utils.load_cifar100_c,
                                               path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset = load_c_dataset(corruption_name=corruption,
                                         corruption_intensity=intensity,
                                         batch_size=test_batch_size,
                                         data_dir=data_dir,
                                         download_data=FLAGS.download_data,
                                         use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        if FLAGS.use_spec_norm:
            logging.info('Use Spectral Normalization with norm bound %.2f',
                         FLAGS.spec_norm_bound)
        if FLAGS.use_gp_layer:
            logging.info('Use GP layer with hidden units %d',
                         FLAGS.gp_hidden_dim)

        model = ub.models.wide_resnet_sngp(
            input_shape=(32, 32, 3),
            batch_size=batch_size,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            l2=FLAGS.l2,
            use_mc_dropout=FLAGS.use_mc_dropout,
            use_filterwise_dropout=FLAGS.use_filterwise_dropout,
            dropout_rate=FLAGS.dropout_rate,
            use_gp_layer=FLAGS.use_gp_layer,
            gp_input_dim=FLAGS.gp_input_dim,
            gp_hidden_dim=FLAGS.gp_hidden_dim,
            gp_scale=FLAGS.gp_scale,
            gp_bias=FLAGS.gp_bias,
            gp_input_normalization=FLAGS.gp_input_normalization,
            gp_random_feature_type=FLAGS.gp_random_feature_type,
            gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
            gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
            use_spec_norm=FLAGS.use_spec_norm,
            spec_norm_iteration=FLAGS.spec_norm_iteration,
            spec_norm_bound=FLAGS.spec_norm_bound)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=1.0 -
                                            FLAGS.one_minus_momentum,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/stddev':
            tf.keras.metrics.Mean(),
        }
        if use_validation_set:
            metrics.update({
                'val/negative_log_likelihood':
                tf.keras.metrics.Mean(),
                'val/accuracy':
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'val/ece':
                rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
                'val/stddev':
                tf.keras.metrics.Mean(),
            })
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        rm.metrics.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator, step):
        """Training StepFn."""
        def step_fn(inputs, step):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']

            if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0:
                # Resetting covaraince estimator at the begining of a new epoch.
                model.layers[-1].reset_covariance_matrix()

            if FLAGS.augmix and FLAGS.aug_count >= 1:
                # Index 0 at augmix preprocessing is the unperturbed image.
                images = images[:, 1, ...]
                # This is for the case of combining AugMix and Mixup.
                if FLAGS.mixup_alpha > 0:
                    labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1]
            images = tf.tile(images,
                             [FLAGS.num_dropout_samples_training, 1, 1, 1])
            if FLAGS.mixup_alpha > 0:
                labels = tf.tile(labels,
                                 [FLAGS.num_dropout_samples_training, 1])
            else:
                labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if isinstance(logits, (list, tuple)):
                    # If model returns a tuple of (logits, covmat), extract logits
                    logits, _ = logits
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                if FLAGS.mixup_alpha > 0:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))

                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            if FLAGS.mixup_alpha > 0:
                labels = tf.argmax(labels, axis=-1)
            metrics['train/ece'].add_batch(probs, label=labels)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), step))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']

            logits_list = []
            stddev_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)
                if isinstance(logits, (list, tuple)):
                    # If model returns a tuple of (logits, covmat), extract both
                    logits, covmat = logits
                else:
                    covmat = tf.eye(FLAGS.per_core_batch_size)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                logits = ed.layers.utils.mean_field_logits(
                    logits,
                    covmat,
                    mean_field_factor=FLAGS.gp_mean_field_factor)
                stddev = tf.sqrt(tf.linalg.diag_part(covmat))

                stddev_list.append(stddev)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            stddev_list = tf.stack(stddev_list, axis=0)

            stddev = tf.reduce_mean(stddev_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
                metrics['test/stddev'].update_state(stddev)
            elif dataset_name == 'val':
                metrics['val/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['val/accuracy'].update_state(labels, probs)
                metrics['val/ece'].add_batch(probs, label=labels)
                metrics['val/stddev'].update_state(stddev)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
                    probs, label=labels)
                corrupt_metrics['test/stddev_{}'.format(
                    dataset_name)].update_state(stddev)

        for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    step_variable = tf.Variable(0, dtype=tf.int32)
    train_iterator = iter(train_dataset)
    start_time = time.time()

    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            step_variable.assign(step)
            # Pass `step` as a tf.Variable to train_step to prevent the tf.function
            # train_step() re-compiling itself at each function call.
            train_step(train_iterator, step_variable)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if use_validation_set:
            datasets_to_evaluate['val'] = validation_dataset
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            steps_per_eval = steps_per_val if dataset_name == 'val' else steps_per_eval
            logging.info('Starting to run eval at epoch: %s', epoch)
            test_start_time = time.time()
            test_step(test_iterator, dataset_name)
            ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
            metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        if use_validation_set:
            logging.info('Val NLL: %.4f, Accuracy: %.2f%%',
                         metrics['val/negative_log_likelihood'].result(),
                         metrics['val/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        # Metrics from Robustness Metrics (like ECE) will return a dict with a
        # single key/value, instead of a scalar.
        total_results = {
            k: (list(v.values())[0] if isinstance(v, dict) else v)
            for k, v in total_results.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)

    final_save_name = os.path.join(FLAGS.output_dir, 'model')
    model.save(final_save_name)
    logging.info('Saved model to %s', final_save_name)
    with summary_writer.as_default():
        hp.hparams({
            'base_learning_rate': FLAGS.base_learning_rate,
            'one_minus_momentum': FLAGS.one_minus_momentum,
            'l2': FLAGS.l2,
            'gp_mean_field_factor': FLAGS.gp_mean_field_factor,
        })
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    data_dir = utils.get_data_dir_from_flags(FLAGS)
    dataset = ub.datasets.get(
        FLAGS.dataset,
        data_dir=data_dir,
        download_data=FLAGS.download_data,
        split=tfds.Split.TEST).load(batch_size=batch_size)
    test_datasets = {'clean': dataset}
    extra_kwargs = {}
    if FLAGS.dataset == 'cifar100':
        data_dir = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                data_dir=data_dir,
                severity=severity,
                split=tfds.Split.TEST,
                **extra_kwargs).load(batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet_sngp(
        input_shape=ds_info.features['image'].shape,
        batch_size=FLAGS.per_core_batch_size,
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        l2=0.,
        use_mc_dropout=FLAGS.use_mc_dropout,
        use_filterwise_dropout=FLAGS.use_filterwise_dropout,
        dropout_rate=FLAGS.dropout_rate,
        use_gp_layer=FLAGS.use_gp_layer,
        gp_input_dim=FLAGS.gp_input_dim,
        gp_hidden_dim=FLAGS.gp_hidden_dim,
        gp_scale=FLAGS.gp_scale,
        gp_bias=FLAGS.gp_bias,
        gp_input_normalization=FLAGS.gp_input_normalization,
        gp_random_feature_type=FLAGS.gp_random_feature_type,
        gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
        gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
        use_spec_norm=FLAGS.use_spec_norm,
        spec_norm_iteration=FLAGS.spec_norm_iteration,
        spec_norm_bound=FLAGS.spec_norm_bound)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits_member = model(features, training=False)
                    if isinstance(logits_member, (list, tuple)):
                        # If model returns a tuple of (logits, covmat), extract both
                        logits_member, covmat_member = logits_member
                    else:
                        covmat_member = tf.eye(FLAGS.per_core_batch_size)
                    logits_member = ed.layers.utils.mean_field_logits(
                        logits_member, covmat_member,
                        FLAGS.gp_mean_field_factor_ensemble)
                    logits.append(logits_member)

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval):
            labels = next(test_iterator)['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
            negative_log_likelihood_metric.add_batch(logits, labels=labels)
            negative_log_likelihood = list(
                negative_log_likelihood_metric.result().values())[0]
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
                gibbs_ce_metric.add_batch(logits, labels=labels)
                gibbs_ce = list(gibbs_ce_metric.result().values())[0]
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].add_batch(
                    probs, label=labels)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Metrics from Robustness Metrics (like ECE) will return a dict with a
    # single key/value, instead of a scalar.
    total_results = {
        k: (list(v.values())[0] if isinstance(v, dict) else v)
        for k, v in total_results.items()
    }
    logging.info('Metrics: %s', total_results)