def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) imagenet_train = utils.ImageNetInput(is_training=True, data_dir=FLAGS.data_dir, batch_size=FLAGS.per_core_batch_size, use_bfloat16=FLAGS.use_bfloat16) imagenet_eval = utils.ImageNetInput(is_training=False, data_dir=FLAGS.data_dir, batch_size=FLAGS.per_core_batch_size, use_bfloat16=FLAGS.use_bfloat16) test_datasets = { 'clean': strategy.experimental_distribute_datasets_from_function( imagenet_eval.input_fn) } if FLAGS.corruptions_interval > 0: corruption_types, max_intensity = utils.load_corrupted_test_info() for name in corruption_types: for intensity in range(1, max_intensity + 1): dataset_name = '{0}_{1}'.format(name, intensity) corrupt_input_fn = utils.corrupt_test_input_fn( batch_size=FLAGS.per_core_batch_size, corruption_name=name, corruption_intensity=intensity, use_bfloat16=FLAGS.use_bfloat16) test_datasets[dataset_name] = ( strategy.experimental_distribute_datasets_from_function( corrupt_input_fn)) train_dataset = strategy.experimental_distribute_datasets_from_function( imagenet_train.input_fn) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) with strategy.scope(): logging.info('Building Keras ResNet-50 model') model = ub.models.resnet50_sngp( input_shape=(224, 224, 3), batch_size=None, num_classes=NUM_CLASSES, use_mc_dropout=FLAGS.use_mc_dropout, dropout_rate=FLAGS.dropout_rate, filterwise_dropout=FLAGS.filterwise_dropout, use_gp_layer=FLAGS.use_gp_layer, gp_hidden_dim=FLAGS.gp_hidden_dim, gp_scale=FLAGS.gp_scale, gp_bias=FLAGS.gp_bias, gp_input_normalization=FLAGS.gp_input_normalization, gp_cov_discount_factor=FLAGS.gp_cov_discount_factor, gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty, gp_output_imagenet_initializer=FLAGS. gp_output_imagenet_initializer, use_spec_norm=FLAGS.use_spec_norm, spec_norm_iteration=FLAGS.spec_norm_iteration, spec_norm_bound=FLAGS.spec_norm_bound) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 256 learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr, FLAGS.train_epochs, _LR_SCHEDULE) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/stddev': tf.keras.metrics.Mean(), } if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) corrupt_metrics['test/stddev_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) logging.info('Finished building Keras ResNet-50 model') checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs with tf.GradientTape() as tape: logits = model(images, training=True) if isinstance(logits, tuple): # If model returns a tuple of (logits, covmat), extract logits logits, _ = logits if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the weights. This excludes BN parameters and biases, but # pay caution to their naming scheme. if 'kernel' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(logits) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs logits_list = [] stddev_list = [] for _ in range(FLAGS.num_dropout_samples): logits = model(images, training=False) if isinstance(logits, tuple): # If model returns a tuple of (logits, covmat), extract both logits, covmat = logits else: covmat = tf.eye(FLAGS.per_core_batch_size) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) logits = ed.layers.utils.mean_field_logits( logits, covmat, mean_field_factor=FLAGS.gp_mean_field_factor) stddev = tf.sqrt(tf.linalg.diag_part(covmat)) stddev_list.append(stddev) logits_list.append(logits) # Logits dimension is (num_samples, batch_size, num_classes). logits_list = tf.stack(logits_list, axis=0) stddev_list = tf.stack(stddev_list, axis=0) stddev = tf.reduce_mean(stddev_list, axis=0) probs_list = tf.nn.softmax(logits_list) probs = tf.reduce_mean(probs_list, axis=0) labels_broadcasted = tf.broadcast_to( labels, [FLAGS.num_dropout_samples, labels.shape[0]]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_broadcasted, logits_list, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[0]) + tf.math.log(float(FLAGS.num_dropout_samples))) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) metrics['test/stddev'].update_state(stddev) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/stddev_{}'.format( dataset_name)].update_state(stddev) strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity, FLAGS.alexnet_errors_path) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) # Save final checkpoint. final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name) # Export final model as SavedModel. final_save_name = os.path.join(FLAGS.output_dir, 'model') model.save(final_save_name) logging.info('Saved model to %s', final_save_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) enable_mixup = (FLAGS.mixup_alpha > 0.0) mixup_params = { 'mixup_alpha': FLAGS.mixup_alpha, 'adaptive_mixup': False, 'same_mix_weight_per_batch': FLAGS.same_mix_weight_per_batch, 'use_random_shuffling': FLAGS.use_random_shuffling, 'use_truncated_beta': FLAGS.use_truncated_beta } train_builder = utils.ImageNetInput( data_dir=FLAGS.data_dir, use_bfloat16=FLAGS.use_bfloat16, one_hot=True, mixup_params=mixup_params) test_builder = utils.ImageNetInput( data_dir=FLAGS.data_dir, use_bfloat16=FLAGS.use_bfloat16) train_dataset = train_builder.as_dataset( split=tfds.Split.TRAIN, batch_size=batch_size) test_dataset = test_builder.as_dataset( split=tfds.Split.TEST, batch_size=batch_size) train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_dataset = strategy.experimental_distribute_dataset(test_dataset) if enable_mixup: mean_theta = mean_truncated_beta_distribution(FLAGS.mixup_alpha) # Train set to compute the means of the images and of the (one-hot) labels imagenet_train_no_mixup = utils.ImageNetInput( data_dir=FLAGS.data_dir, use_bfloat16=FLAGS.use_bfloat16, one_hot=True) imagenet_train_no_mixup = imagenet_train_no_mixup.as_dataset( split=tfds.Split.TRAIN, batch_size=batch_size) tr_data_no_mixup = strategy.experimental_distribute_dataset( imagenet_train_no_mixup) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) with strategy.scope(): if enable_mixup: # Variables used to track the means of the images and the (one-hot) labels count = tf.Variable(tf.zeros((1,), dtype=tf.float32)) mean_images = tf.Variable(tf.zeros(IMAGE_SHAPE, dtype=tf.float32)) mean_labels = tf.Variable(tf.zeros((NUM_CLASSES,), dtype=tf.float32)) logging.info('Building Keras ResNet-50 model') model = ub.models.resnet50_deterministic(input_shape=IMAGE_SHAPE, num_classes=NUM_CLASSES) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 256 learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr, FLAGS.train_epochs, _LR_SCHEDULE) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), } logging.info('Finished building Keras ResNet-50 model') if enable_mixup: # With mixup enabled, we log the predictions with the rescaling from [2] metrics['test/negative_log_likelihood+rescaling'] = (tf.keras.metrics .Mean()) metrics['test/accuracy+rescaling'] = (tf.keras.metrics .SparseCategoricalAccuracy()) metrics['test/ece+rescaling'] = um.ExpectedCalibrationError( num_bins=FLAGS.num_bins) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) @tf.function def moving_average_step(iterator): """Training StepFn to compute the means of the images and labels.""" def step_fn_labels(labels): return tf.reduce_mean(labels, axis=0) def step_fn_images(images): return tf.reduce_mean(tf.cast(images, tf.float32), axis=0) new_count = count + 1. count.assign(new_count) images, labels = next(iterator) per_replica_means = strategy.run(step_fn_labels, args=(labels,)) cr_replica_means = strategy.reduce('mean', per_replica_means, axis=0) mean_labels.assign(cr_replica_means/count + (count-1.)/count * mean_labels) per_replica_means = strategy.run(step_fn_images, args=(images,)) cr_replica_means = strategy.reduce('mean', per_replica_means, axis=0) mean_images.assign(cr_replica_means/count + (count-1.)/count * mean_images) @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.categorical_crossentropy( labels, logits, from_logits=True)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the weights. This excludes BN parameters and biases, but # pay caution to their naming scheme. if 'kernel' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1,))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(logits) # We go back from one-hot labels to integers labels = tf.argmax(labels, axis=-1) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) strategy.run(step_fn, args=(next(iterator),)) @tf.function def update_test_metrics(labels, logits, metric_suffix=''): negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) probs = tf.nn.softmax(logits) metrics['test/negative_log_likelihood' + metric_suffix].update_state( negative_log_likelihood) metrics['test/accuracy' + metric_suffix].update_state(labels, probs) metrics['test/ece' + metric_suffix].update_state(labels, probs) @tf.function def test_step(iterator): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) update_test_metrics(labels, logits) # Rescaling logic in Eq.(15) from [2] if enable_mixup: images *= mean_theta images += (1.-mean_theta) * tf.cast(mean_images, images.dtype) scaled_logits = model(images, training=False) if FLAGS.use_bfloat16: scaled_logits = tf.cast(scaled_logits, tf.float32) scaled_logits *= 1./mean_theta scaled_logits += (1.-1./mean_theta) * tf.cast(mean_labels, logits.dtype) update_test_metrics(labels, scaled_logits, '+rescaling') strategy.run(step_fn, args=(next(iterator),)) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) if enable_mixup: logging.info('Starting to compute the means of labels and images') tr_iterator_no_mixup = iter(tr_data_no_mixup) for step in range(steps_per_epoch): moving_average_step(tr_iterator_no_mixup) # Save stats required by the mixup rescaling [2] for subsequent predictions mixup_rescaling_stats = { 'mean_labels': mean_labels.numpy(), 'mean_images': mean_images.numpy(), 'mean_theta': mean_theta } output_dir = os.path.join(FLAGS.output_dir, 'mixup_rescaling_stats.npz') with tf.io.gfile.GFile(output_dir, 'wb') as f: np.save(f, list(mixup_rescaling_stats.items())) logging.info('Finished to compute the means of labels and images') train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) test_iterator = iter(test_dataset) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) if enable_mixup: logging.info( 'Test NLL (+ rescaling): %.4f, Accuracy (+ rescaling): %.2f%%', metrics['test/negative_log_likelihood+rescaling'].result(), metrics['test/accuracy+rescaling'].result() * 100) total_results = {name: metric.result() for name, metric in metrics.items()} with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save(os.path.join( FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_save_name = os.path.join(FLAGS.output_dir, 'model') model.save(final_save_name) logging.info('Saved model to %s', final_save_name)
def main(argv): del argv # unused arg tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) # In BatchEnsemble version 2, the # input images are not only tiled in the inference mode but also tiled in the # training. BatchEnsemble version 2 means each ensemble member is trained with # the same batch size as single model. if FLAGS.version2: logging.info('Training BatchEnsemble version 2') batch_size = ((FLAGS.per_core_batch_size // FLAGS.num_models) * FLAGS.num_cores) else: batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size logging.info('Saving checkpoints at %s', FLAGS.output_dir) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) imagenet_train = utils.ImageNetInput(is_training=True, data_dir=FLAGS.data_dir, batch_size=batch_size, use_bfloat16=not FLAGS.use_gpu, drop_remainder=True) imagenet_eval = utils.ImageNetInput(is_training=False, data_dir=FLAGS.data_dir, batch_size=batch_size, use_bfloat16=not FLAGS.use_gpu, drop_remainder=True) train_dataset = strategy.experimental_distribute_dataset( imagenet_train.input_fn()) test_dataset = strategy.experimental_distribute_dataset( imagenet_eval.input_fn()) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras ResNet-50 model') model = batchensemble_model.ensemble_resnet50( input_shape=(224, 224, 3), num_classes=NUM_CLASSES, num_models=FLAGS.num_models, random_sign_init=FLAGS.random_sign_init, use_tpu=not FLAGS.use_gpu) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 256 learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr, FLAGS.train_epochs, _LR_SCHEDULE) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), } for i in range(FLAGS.num_models): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) logging.info('Finished building Keras ResNet-50 model') checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs if FLAGS.version2: images = tf.tile(images, [FLAGS.num_models, 1, 1, 1]) labels = tf.tile(labels, [FLAGS.num_models, 1]) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the slow weights and bias terms. This excludes BN # parameters and fast weight approximate posterior/prior parameters, # but pay caution to their naming scheme. if 'kernel' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate implementation. if FLAGS.fast_weight_lr_multiplier != 1.0: grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): # Apply different learning rate on the fast weights. This excludes BN # and slow weights, but pay caution to the naming scheme. if ('batch_norm' not in var.name and 'kernel' not in var.name): grads_and_vars.append( (grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) else: optimizer.apply_gradients(zip(grads, model.trainable_variables)) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) strategy.experimental_run_v2(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs images = tf.tile(images, [FLAGS.num_models, 1, 1, 1]) logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) per_probs = tf.split(probs, num_or_size_splits=FLAGS.num_models, axis=0) for i in range(FLAGS.num_models): member_probs = per_probs[i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) probs = tf.reduce_mean(per_probs, axis=0) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, probs)) metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) strategy.experimental_run_v2(step_fn, args=(next(iterator), )) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) test_iterator = iter(test_dataset) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_step(test_iterator) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.num_models): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) with summary_writer.as_default(): for name, metric in metrics.items(): tf.summary.scalar(name, metric.result(), step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (epoch + 1) % 20 == 0: checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size batch_size = per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) mixup_params = { 'ensemble_size': FLAGS.ensemble_size, 'mixup_alpha': FLAGS.mixup_alpha, 'adaptive_mixup': FLAGS.adaptive_mixup, 'num_classes': NUM_CLASSES, } train_builder = utils.ImageNetInput(data_dir=FLAGS.data_dir, one_hot=(FLAGS.mixup_alpha > 0), use_bfloat16=FLAGS.use_bfloat16, mixup_params=mixup_params, ensemble_size=FLAGS.ensemble_size) test_builder = utils.ImageNetInput(data_dir=FLAGS.data_dir, use_bfloat16=FLAGS.use_bfloat16) train_dataset = train_builder.as_dataset(split=tfds.Split.TRAIN, batch_size=batch_size) clean_test_dataset = test_builder.as_dataset(split=tfds.Split.TEST, batch_size=batch_size) train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_datasets = { 'clean': strategy.experimental_distribute_dataset(clean_test_dataset) } if FLAGS.adaptive_mixup: imagenet_confidence_dataset = test_builder.as_dataset( split=tfds.Split.VALIDATION, batch_size=FLAGS.per_core_batch_size * FLAGS.num_cores) imagenet_confidence_dataset = ( strategy.experimental_distribute_dataset( imagenet_confidence_dataset)) if FLAGS.corruptions_interval > 0: corruption_types, max_intensity = utils.load_corrupted_test_info() for name in corruption_types: for intensity in range(1, max_intensity + 1): dataset_name = '{0}_{1}'.format(name, intensity) dataset = utils.load_corrupted_test_dataset( batch_size=batch_size, corruption_name=name, corruption_intensity=intensity, use_bfloat16=FLAGS.use_bfloat16) test_datasets[dataset_name] = ( strategy.experimental_distribute_dataset(dataset)) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras ResNet-50 model') model = ub.models.resnet_batchensemble( input_shape=(224, 224, 3), num_classes=NUM_CLASSES, ensemble_size=FLAGS.ensemble_size, random_sign_init=FLAGS.random_sign_init, use_ensemble_bn=FLAGS.use_ensemble_bn, depth=FLAGS.depth) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 256 learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr, FLAGS.train_epochs, _LR_SCHEDULE) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/member_accuracy_mean': (tf.keras.metrics.SparseCategoricalAccuracy()), 'test/member_ece_mean': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins) } if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) corrupt_metrics['test/member_acc_mean_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/member_ece_mean_{}'.format( dataset_name)] = (um.ExpectedCalibrationError( num_bins=FLAGS.num_bins)) test_diversity = {} training_diversity = {} for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) test_diversity = { 'test/disagreement': tf.keras.metrics.Mean(), 'test/average_kl': tf.keras.metrics.Mean(), 'test/cosine_similarity': tf.keras.metrics.Mean(), } training_diversity = { 'train/disagreement': tf.keras.metrics.Mean(), 'train/average_kl': tf.keras.metrics.Mean(), 'train/cosine_similarity': tf.keras.metrics.Mean(), } logging.info('Finished building Keras ResNet-50 model') checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs if FLAGS.adaptive_mixup: images = tf.identity(images) else: images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) if FLAGS.adaptive_mixup: labels = tf.identity(labels) elif FLAGS.mixup_alpha > 0: labels = tf.tile(labels, [FLAGS.ensemble_size, 1]) else: labels = tf.tile(labels, [FLAGS.ensemble_size]) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) per_probs = tf.reshape( probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0)) diversity_results = um.average_pairwise_diversity( per_probs, FLAGS.ensemble_size) if FLAGS.mixup_alpha > 0: negative_log_likelihood = tf.reduce_mean( tf.keras.losses.categorical_crossentropy( labels, logits, from_logits=True)) else: negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the slow weights and bias terms. This excludes BN # parameters and fast weight approximate posterior/prior parameters, # but pay caution to their naming scheme. if 'kernel' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate implementation. if FLAGS.fast_weight_lr_multiplier != 1.0: grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): # Apply different learning rate on the fast weights. This excludes BN # and slow weights, but pay caution to the naming scheme. if ('batch_norm' not in var.name and 'kernel' not in var.name): grads_and_vars.append( (grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) else: optimizer.apply_gradients(zip(grads, model.trainable_variables)) if FLAGS.mixup_alpha > 0: labels = tf.argmax(labels, axis=-1) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) for k, v in diversity_results.items(): training_diversity['train/' + k].update_state(v) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) if dataset_name == 'clean': per_probs_tensor = tf.reshape( probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0)) diversity_results = um.average_pairwise_diversity( per_probs_tensor, FLAGS.ensemble_size) for k, v in diversity_results.items(): test_diversity['test/' + k].update_state(v) per_probs = tf.split(probs, num_or_size_splits=FLAGS.ensemble_size, axis=0) probs = tf.reduce_mean(per_probs, axis=0) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, probs)) for i in range(FLAGS.ensemble_size): member_probs = per_probs[i] if dataset_name == 'clean': member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) metrics['test/member_accuracy_mean'].update_state( labels, member_probs) metrics['test/member_ece_mean'].update_state( labels, member_probs) elif dataset_name != 'confidence_validation': corrupt_metrics['test/member_acc_mean_{}'.format( dataset_name)].update_state(labels, member_probs) corrupt_metrics['test/member_ece_mean_{}'.format( dataset_name)].update_state(labels, member_probs) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) elif dataset_name != 'confidence_validation': corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) if dataset_name == 'confidence_validation': return tf.stack(per_probs, 0), labels if dataset_name == 'confidence_validation': return strategy.run(step_fn, args=(next(iterator), )) else: strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) if FLAGS.adaptive_mixup: confidence_set_iterator = iter(imagenet_confidence_dataset) predictions_list = [] labels_list = [] for step in range(FLAGS.confidence_eval_iterations): temp_predictions, temp_labels = test_step( confidence_set_iterator, 'confidence_validation') predictions_list.append(temp_predictions) labels_list.append(temp_labels) predictions = [ tf.concat(list(predictions_list[i].values), axis=1) for i in range(len(predictions_list)) ] labels = [ tf.concat(list(labels_list[i].values), axis=0) for i in range(len(labels_list)) ] predictions = tf.concat(predictions, axis=1) labels = tf.cast(tf.concat(labels, axis=0), tf.int64) def compute_acc_conf(preds, label, focus_class): class_preds = tf.boolean_mask(preds, label == focus_class, axis=1) class_pred_labels = tf.argmax(class_preds, axis=-1) confidence = tf.reduce_mean( tf.reduce_max(class_preds, axis=-1), -1) accuracy = tf.reduce_mean(tf.cast( class_pred_labels == focus_class, tf.float32), axis=-1) return accuracy - confidence calibration_per_class = [ compute_acc_conf(predictions, labels, i) for i in range(NUM_CLASSES) ] calibration_per_class = tf.stack(calibration_per_class, axis=1) logging.info('calibration per class') logging.info(calibration_per_class) mixup_coeff = tf.where(calibration_per_class > 0, 1.0, FLAGS.mixup_alpha) mixup_coeff = tf.clip_by_value(mixup_coeff, 0, 1) logging.info('mixup coeff') logging.info(mixup_coeff) mixup_params['mixup_coeff'] = mixup_coeff builder = utils.ImageNetInput(data_dir=FLAGS.data_dir, one_hot=(FLAGS.mixup_alpha > 0), use_bfloat16=FLAGS.use_bfloat16, mixup_params=mixup_params) train_dataset = builder.as_dataset(split=tfds.Split.TRAIN, batch_size=batch_size) train_dataset = strategy.experimental_distribute_dataset( train_dataset) train_iterator = iter(train_dataset) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity, FLAGS.alexnet_errors_path) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_metrics = metrics.copy() total_metrics.update(training_diversity) total_metrics.update(test_diversity) total_results = { name: metric.result() for name, metric in total_metrics.items() } total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for _, metric in total_metrics.items(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_save_name = os.path.join(FLAGS.output_dir, 'model') model.save(final_save_name) logging.info('Saved model to %s', final_save_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) train_input_fn = utils.load_input_fn( split=tfds.Split.TRAIN, name=FLAGS.dataset, batch_size=FLAGS.per_core_batch_size // FLAGS.batch_repetitions, use_bfloat16=FLAGS.use_bfloat16) clean_test_input_fn = utils.load_input_fn( split=tfds.Split.TEST, name=FLAGS.dataset, batch_size=FLAGS.per_core_batch_size, use_bfloat16=FLAGS.use_bfloat16) train_dataset = strategy.experimental_distribute_datasets_from_function( train_input_fn) test_datasets = { 'clean': strategy.experimental_distribute_datasets_from_function( clean_test_input_fn), } if FLAGS.corruptions_interval > 0: if FLAGS.dataset == 'cifar10': load_c_input_fn = utils.load_cifar10_c_input_fn else: load_c_input_fn = functools.partial(utils.load_cifar100_c_input_fn, path=FLAGS.cifar100_c_path) corruption_types, max_intensity = utils.load_corrupted_test_info( FLAGS.dataset) for corruption in corruption_types: for intensity in range(1, max_intensity + 1): input_fn = load_c_input_fn( corruption_name=corruption, corruption_intensity=intensity, batch_size=FLAGS.per_core_batch_size, use_bfloat16=FLAGS.use_bfloat16) test_datasets['{0}_{1}'.format(corruption, intensity)] = ( strategy.experimental_distribute_datasets_from_function( input_fn)) ds_info = tfds.builder(FLAGS.dataset).info train_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.batch_repetitions test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores train_dataset_size = ds_info.splits['train'].num_examples steps_per_epoch = train_dataset_size // train_batch_size steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size num_classes = ds_info.features['label'].num_classes if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras model') model = ub.models.wide_resnet_mimo( input_shape=[FLAGS.ensemble_size] + list(ds_info.features['image'].shape), depth=28, width_multiplier=FLAGS.width_multiplier, num_classes=num_classes, ensemble_size=FLAGS.ensemble_size) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * train_batch_size / 128 lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200 for start_epoch_str in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule(steps_per_epoch, base_lr, FLAGS.lr_decay_ratio, lr_decay_epochs, FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), } if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) test_diversity = { 'test/disagreement': tf.keras.metrics.Mean(), 'test/average_kl': tf.keras.metrics.Mean(), 'test/cosine_similarity': tf.keras.metrics.Mean(), } checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs batch_size = tf.shape(images)[0] main_shuffle = tf.random.shuffle( tf.tile(tf.range(batch_size), [FLAGS.batch_repetitions])) to_shuffle = tf.cast( tf.cast(tf.shape(main_shuffle)[0], tf.float32) * (1. - FLAGS.input_repetition_probability), tf.int32) shuffle_indices = [ tf.concat([ tf.random.shuffle(main_shuffle[:to_shuffle]), main_shuffle[to_shuffle:] ], axis=0) for _ in range(FLAGS.ensemble_size) ] images = tf.stack([ tf.gather(images, indices, axis=0) for indices in shuffle_indices ], axis=1) labels = tf.stack([ tf.gather(labels, indices, axis=0) for indices in shuffle_indices ], axis=1) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.reduce_sum( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True), axis=1)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the BN parameters and bias terms. if ('kernel' in var.name or 'batch_norm' in var.name or 'bias' in var.name): filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(tf.reshape(logits, [-1, num_classes])) flat_labels = tf.reshape(labels, [-1]) metrics['train/ece'].update_state(flat_labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(flat_labels, probs) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs images = tf.tile(tf.expand_dims(images, 1), [1, FLAGS.ensemble_size, 1, 1, 1]) logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) if dataset_name == 'clean': per_probs = tf.transpose(probs, perm=[1, 0, 2]) diversity_results = um.average_pairwise_diversity( per_probs, FLAGS.ensemble_size) for k, v in diversity_results.items(): test_diversity['test/' + k].update_state(v) for i in range(FLAGS.ensemble_size): member_probs = probs[:, i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) # Negative log marginal likelihood computed in a numerically-stable way. labels_tiled = tf.tile(tf.expand_dims(labels, 1), [1, FLAGS.ensemble_size]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_tiled, logits, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[1]) + tf.math.log(float(FLAGS.ensemble_size))) probs = tf.math.reduce_mean(probs, axis=1) # marginalize if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * (FLAGS.train_epochs) time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / test_batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) metrics.update(test_diversity) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) # Initialize distribution strategy on flag-specified accelerator strategy = utils.init_distribution_strategy(FLAGS.force_use_cpu, FLAGS.use_gpu, FLAGS.tpu) train_batch_size = FLAGS.train_batch_size * FLAGS.num_cores eval_batch_size = FLAGS.eval_batch_size * FLAGS.num_cores # As per the Kaggle challenge, we have split sizes: # train: 35,126 # validation: 10,906 (currently unused) # test: 42,670 ds_info = tfds.builder('diabetic_retinopathy_detection').info steps_per_epoch = ds_info.splits['train'].num_examples // train_batch_size steps_per_eval = ds_info.splits['test'].num_examples // eval_batch_size dataset_train_builder = ub.datasets.get('diabetic_retinopathy_detection', split='train', data_dir=FLAGS.data_dir) dataset_train = dataset_train_builder.load(batch_size=train_batch_size) dataset_train = strategy.experimental_distribute_dataset(dataset_train) dataset_test_builder = ub.datasets.get('diabetic_retinopathy_detection', split='test', data_dir=FLAGS.data_dir) dataset_test = dataset_test_builder.load(batch_size=eval_batch_size) dataset_test = strategy.experimental_distribute_dataset(dataset_test) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras ResNet-50 deterministic model.') model = ub.models.resnet50_deterministic( input_shape=utils.load_input_shape(dataset_train), num_classes=1) # binary classification task logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = (FLAGS.base_learning_rate * train_batch_size) / DEFAULT_TRAIN_BATCH_SIZE lr_decay_epochs = [ (int(start_epoch_str) * FLAGS.train_epochs) // DEFAULT_NUM_EPOCHS for start_epoch_str in FLAGS.lr_decay_epochs ] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.BinaryAccuracy(), 'train/loss': tf.keras.metrics.Mean(), # NLL + L2 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.BinaryAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins) } checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() # so that optimizer slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch # Finally, define OOD metrics outside the accelerator scope for CPU eval. metrics.update({ 'train/auc': tf.keras.metrics.AUC(), 'test/auc': tf.keras.metrics.AUC() }) @tf.function def train_step(iterator): """Training step function.""" def step_fn(inputs): """Per-replica step function.""" images = inputs['features'] labels = inputs['labels'] with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.binary_crossentropy(y_true=tf.expand_dims( labels, axis=-1), y_pred=logits, from_logits=True)) l2_loss = sum(model.losses) loss = negative_log_likelihood + (FLAGS.l2 * l2_loss) # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.squeeze(tf.nn.sigmoid(logits)) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, probs) metrics['train/auc'].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator): """Evaluation step function.""" def step_fn(inputs): """Per-replica step function.""" images = inputs['features'] labels = inputs['labels'] logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.binary_crossentropy(y_true=tf.expand_dims( labels, axis=-1), y_pred=logits, from_logits=True)) probs = tf.squeeze(tf.nn.sigmoid(logits)) metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/auc'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): train_iterator = iter(dataset_train) test_iterator = iter(dataset_test) logging.info('Starting to run epoch: %s', epoch + 1) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch + 1) test_start_time = time.time() test_step(test_iterator) ms_per_example = (time.time() - test_start_time) * 1e6 / eval_batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info( 'Train Loss (NLL+L2): %.4f, Accuracy: %.2f%%, AUC: %.2f%%, ECE: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100, metrics['train/auc'].result() * 100, metrics['train/ece'].result() * 100) logging.info( 'Test NLL: %.4f, Accuracy: %.2f%%, AUC: %.2f%%, ECE: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100, metrics['test/auc'].result() * 100, metrics['test/ece'].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) # TODO(nband): debug checkpointing # Also save Keras model, due to checkpoint.save issue keras_model_name = os.path.join(FLAGS.output_dir, f'keras_model_{epoch + 1}') model.save(keras_model_name) logging.info('Saved keras model to %s', keras_model_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name) keras_model_name = os.path.join(FLAGS.output_dir, f'keras_model_{FLAGS.train_epochs}') model.save(keras_model_name) logging.info('Saved keras model to %s', keras_model_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) ds_info = tfds.builder(FLAGS.dataset).info batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores train_dataset_size = ds_info.splits['train'].num_examples steps_per_epoch = train_dataset_size // batch_size steps_per_eval = ds_info.splits['test'].num_examples // batch_size num_classes = ds_info.features['label'].num_classes train_dataset = ub.datasets.get( FLAGS.dataset, split=tfds.Split.TRAIN).load(batch_size=batch_size) clean_test_dataset = ub.datasets.get( FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size) train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_datasets = { 'clean': strategy.experimental_distribute_dataset(clean_test_dataset), } if FLAGS.corruptions_interval > 0: extra_kwargs = {} if FLAGS.dataset == 'cifar100': extra_kwargs['data_dir'] = FLAGS.cifar100_c_path corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset) for corruption_type in corruption_types: for severity in range(1, 6): dataset = ub.datasets.get( f'{FLAGS.dataset}_corrupted', corruption_type=corruption_type, severity=severity, split=tfds.Split.TEST, **extra_kwargs).load(batch_size=batch_size) test_datasets[f'{corruption_type}_{severity}'] = ( strategy.experimental_distribute_dataset(dataset)) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building ResNet model') model = ub.models.wide_resnet_variational( input_shape=ds_info.features['image'].shape, depth=28, width_multiplier=10, num_classes=num_classes, prior_stddev=FLAGS.prior_stddev, dataset_size=train_dataset_size, stddev_init=FLAGS.stddev_init) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200 for start_epoch_str in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'train/kl': tf.keras.metrics.Mean(), 'train/kl_scale': tf.keras.metrics.Mean(), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), } if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, 6): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] with tf.GradientTape() as tape: logits = model(images, training=True) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the BN parameters and bias terms. This # excludes only fast weight approximate posterior/prior parameters, # but pay caution to their naming scheme. if 'batch_norm' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) kl = sum(model.losses) kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype) kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs kl_scale = tf.minimum(1., kl_scale) kl_loss = kl_scale * kl # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss + kl_loss scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(logits) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/kl'].update_state(kl) metrics['train/kl_scale'].update_state(kl_scale) metrics['train/accuracy'].update_state(labels, logits) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] # TODO(trandustin): Use more eval samples only on corrupted predictions; # it's expensive but a one-time compute if scheduled post-training. if FLAGS.num_eval_samples > 1 and dataset_name != 'clean': logits = tf.stack([ model(images, training=False) for _ in range(FLAGS.num_eval_samples) ], axis=0) else: logits = model(images, training=False) probs = tf.nn.softmax(logits) if FLAGS.num_eval_samples > 1 and dataset_name != 'clean': probs = tf.reduce_mean(probs, axis=0) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, probs)) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) ds_info = tfds.builder(FLAGS.dataset).info per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size batch_size = per_core_batch_size * FLAGS.num_cores # Train_proportion is a float so need to convert steps_per_epoch to int. steps_per_epoch = int( (ds_info.splits['train'].num_examples * FLAGS.train_proportion) // batch_size) steps_per_eval = ds_info.splits['test'].num_examples // batch_size num_classes = ds_info.features['label'].num_classes if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) train_dataset = ub.datasets.get( FLAGS.dataset, split=tfds.Split.TRAIN, validation_percent=1. - FLAGS.train_proportion).load(batch_size=batch_size) clean_test_dataset = ub.datasets.get( FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size) train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_datasets = { 'clean': strategy.experimental_distribute_dataset(clean_test_dataset), } if FLAGS.corruptions_interval > 0: extra_kwargs = {} if FLAGS.dataset == 'cifar100': extra_kwargs['data_dir'] = FLAGS.cifar100_c_path corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset) for corruption_type in corruption_types: for severity in range(1, 6): dataset = ub.datasets.get( f'{FLAGS.dataset}_corrupted', corruption_type=corruption_type, severity=severity, split=tfds.Split.TEST, **extra_kwargs).load(batch_size=batch_size) test_datasets[f'{corruption_type}_{severity}'] = ( strategy.experimental_distribute_dataset(dataset)) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras model') model = ub.models.wide_resnet_sngp_be( input_shape=ds_info.features['image'].shape, batch_size=batch_size, depth=28, width_multiplier=10, num_classes=num_classes, ensemble_size=FLAGS.ensemble_size, random_sign_init=FLAGS.random_sign_init, l2=FLAGS.l2, use_gp_layer=FLAGS.use_gp_layer, gp_input_dim=FLAGS.gp_input_dim, gp_hidden_dim=FLAGS.gp_hidden_dim, gp_scale=FLAGS.gp_scale, gp_bias=FLAGS.gp_bias, gp_input_normalization=FLAGS.gp_input_normalization, gp_cov_discount_factor=FLAGS.gp_cov_discount_factor, gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty, use_spec_norm=FLAGS.use_spec_norm, spec_norm_iteration=FLAGS.spec_norm_iteration, spec_norm_bound=FLAGS.spec_norm_bound) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200 for start_epoch_str in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/stddev': tf.keras.metrics.Mean(), } for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, 6): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) corrupt_metrics['test/stddev_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) labels = tf.tile(labels, [FLAGS.ensemble_size]) with tf.GradientTape() as tape: logits = model(images, training=True) if isinstance(logits, tuple): # If model returns a tuple of (logits, covmat), extract logits logits, _ = logits negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) l2_loss = sum(model.losses) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate implementation. if FLAGS.fast_weight_lr_multiplier != 1.0: grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): # Apply different learning rate on the fast weight approximate # posterior/prior parameters. This is excludes BN and slow weights, # but pay caution to the naming scheme. if ('batch_norm' not in var.name and 'kernel' not in var.name): grads_and_vars.append( (grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) else: optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(logits) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] logits_list = [] stddev_list = [] for i in range(FLAGS.ensemble_size): logits = model(images, training=False) if isinstance(logits, tuple): # If model returns a tuple of (logits, covmat), extract both logits, covmat = logits else: covmat = tf.eye(FLAGS.per_core_batch_size) logits = mean_field_logits( logits, covmat, mean_field_factor=FLAGS.gp_mean_field_factor) stddev = tf.sqrt(tf.linalg.diag_part(covmat)) stddev_list.append(stddev) logits_list.append(logits) member_probs = tf.nn.softmax(logits) member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) # Logits dimension is (num_samples, batch_size, num_classes). logits_list = tf.stack(logits_list, axis=0) stddev_list = tf.stack(stddev_list, axis=0) stddev = tf.reduce_mean(stddev_list, axis=0) probs_list = tf.nn.softmax(logits_list) probs = tf.reduce_mean(probs_list, axis=0) labels_broadcasted = tf.broadcast_to( labels, [FLAGS.ensemble_size, labels.shape[0]]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_broadcasted, logits_list, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[0]) + tf.math.log(float(FLAGS.ensemble_size))) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) metrics['test/stddev'].update_state(stddev) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/stddev_{}'.format( dataset_name)].update_state(stddev) strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv): del argv # unused arg tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size logging.info('Saving checkpoints at %s', FLAGS.output_dir) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) imagenet_train = utils.ImageNetInput(is_training=True, data_dir=FLAGS.data_dir, batch_size=batch_size, use_bfloat16=not FLAGS.use_gpu, drop_remainder=True) imagenet_eval = utils.ImageNetInput(is_training=False, data_dir=FLAGS.data_dir, batch_size=batch_size, use_bfloat16=not FLAGS.use_gpu, drop_remainder=True) train_dataset = strategy.experimental_distribute_dataset( imagenet_train.input_fn()) test_dataset = strategy.experimental_distribute_dataset( imagenet_eval.input_fn()) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) with strategy.scope(): logging.info('Building Keras ResNet-50 model') model = deterministic_model.resnet50(input_shape=(224, 224, 3), num_classes=NUM_CLASSES) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 256 learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr, FLAGS.train_epochs, _LR_SCHEDULE) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True) train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32) train_nll = tf.keras.metrics.Mean('train_nll', dtype=tf.float32) train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( 'train_accuracy', dtype=tf.float32) test_nll = tf.keras.metrics.Mean('test_nll', dtype=tf.float32) test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( 'test_accuracy', dtype=tf.float32) logging.info('Finished building Keras ResNet-50 model') checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs with tf.GradientTape() as tape: predictions = model(images, training=True) if FLAGS.use_bfloat16: predictions = tf.cast(predictions, tf.float32) prediction_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, predictions) loss1 = tf.reduce_mean(prediction_loss) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the weights. This excludes BN parameters and biases, but # pay caution to their naming scheme. if 'kernel' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = loss1 + l2_loss scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) train_loss.update_state(loss) train_nll.update_state(loss1) train_accuracy.update_state(labels, predictions) strategy.experimental_run_v2(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs predictions = model(images, training=False) if FLAGS.use_bfloat16: predictions = tf.cast(predictions, tf.float32) loss = tf.keras.losses.sparse_categorical_crossentropy( labels, predictions) loss = safe_mean(loss) test_nll.update_state(loss) test_accuracy.update_state(labels, predictions) strategy.experimental_run_v2(step_fn, args=(next(iterator), )) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) with summary_writer.as_default(): for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ( '{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) tf.summary.scalar('train/loss', train_loss.result(), step=epoch + 1) tf.summary.scalar('train/nll', train_nll.result(), step=epoch + 1) tf.summary.scalar('train/accuracy', train_accuracy.result(), step=epoch + 1) logging.info('Train loss: %s, Accuracy: %s%%', round(float(train_loss.result()), 4), round(float(train_accuracy.result() * 100), 2)) train_loss.reset_states() train_nll.reset_states() train_accuracy.reset_states() test_iterator = iter(test_dataset) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_step(test_iterator) tf.summary.scalar('test/negative_log_likelihood', test_nll.result(), step=epoch + 1) tf.summary.scalar('test/accuracy', test_accuracy.result(), step=epoch + 1) logging.info('Test NLL: %s, Accuracy: %s%%', round(float(test_nll.result()), 4), round(float(test_accuracy.result() * 100), 2)) test_nll.reset_states() test_accuracy.reset_states() checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size check_bool = FLAGS.train_proportion > 0 and FLAGS.train_proportion <= 1 assert check_bool, 'Proportion of train set has to meet 0 < prop <= 1.' drop_remainder_validation = True if not FLAGS.use_gpu: # This has to be True for TPU traing, otherwise the batchsize of images in # the validation set can't be determined by TPU compile. assert drop_remainder_validation, 'drop_remainder must be True in TPU mode.' train_input_fn = utils.load_input_fn(split=tfds.Split.TRAIN, name=FLAGS.dataset, batch_size=per_core_batch_size, use_bfloat16=FLAGS.use_bfloat16, repeat=True, proportion=FLAGS.train_proportion) validation_proportion = 1 - FLAGS.train_proportion validation_input_fn = utils.load_input_fn( split=tfds.Split.VALIDATION, name=FLAGS.dataset, batch_size=per_core_batch_size, use_bfloat16=FLAGS.use_bfloat16, repeat=True, proportion=validation_proportion, drop_remainder=drop_remainder_validation) clean_test_input_fn = utils.load_input_fn(split=tfds.Split.TEST, name=FLAGS.dataset, batch_size=per_core_batch_size, use_bfloat16=FLAGS.use_bfloat16) train_dataset = strategy.experimental_distribute_datasets_from_function( train_input_fn) validation_dataset = strategy.experimental_distribute_datasets_from_function( validation_input_fn) test_datasets = { 'clean': strategy.experimental_distribute_datasets_from_function( clean_test_input_fn), } if FLAGS.corruptions_interval > 0: if FLAGS.dataset == 'cifar10': load_c_input_fn = utils.load_cifar10_c_input_fn else: load_c_input_fn = functools.partial(utils.load_cifar100_c_input_fn, path=FLAGS.cifar100_c_path) corruption_types, max_intensity = utils.load_corrupted_test_info( FLAGS.dataset) for corruption in corruption_types: for intensity in range(1, max_intensity + 1): input_fn = load_c_input_fn(corruption_name=corruption, corruption_intensity=intensity, batch_size=per_core_batch_size, use_bfloat16=FLAGS.use_bfloat16) test_datasets['{0}_{1}'.format(corruption, intensity)] = ( strategy.experimental_distribute_datasets_from_function( input_fn)) ds_info = tfds.builder(FLAGS.dataset).info batch_size = per_core_batch_size * FLAGS.num_cores train_sample_size = ds_info.splits[ 'train'].num_examples * FLAGS.train_proportion steps_per_epoch = int(train_sample_size / batch_size) train_sample_size = int(train_sample_size) steps_per_eval = ds_info.splits['test'].num_examples // batch_size num_classes = ds_info.features['label'].num_classes if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) logging.info('Building Keras model.') depth = 28 width = 10 dict_ranges = {'min': FLAGS.min_l2_range, 'max': FLAGS.max_l2_range} ranges = [dict_ranges for _ in range(6)] # 6 independent l2 parameters model_config = { 'key_to_index': { 'input_conv_l2_kernel': 0, 'group_l2_kernel': 1, 'group_1_l2_kernel': 2, 'group_2_l2_kernel': 3, 'dense_l2_kernel': 4, 'dense_l2_bias': 5, }, 'ranges': ranges, 'test': None } lambdas_config = LambdaConfig(model_config['ranges'], model_config['key_to_index']) if FLAGS.e_body_hidden_units > 0: e_body_arch = '({},)'.format(FLAGS.e_body_hidden_units) else: e_body_arch = '()' e_shared_arch = '()' e_activation = 'tanh' filters_resnet = [16] for i in range(0, 3): # 3 groups of blocks filters_resnet.extend([16 * width * 2**i] * 9) # 9 layers in each block # e_head dim for conv2d is just the number of filters (only # kernel) and twice num of classes for the last dense layer (kernel + bias) e_head_dims = [x for x in filters_resnet] + [2 * num_classes] with strategy.scope(): e_models = e_factory( lambdas_config.input_shape, e_head_dims=e_head_dims, e_body_arch=eval(e_body_arch), # pylint: disable=eval-used e_shared_arch=eval(e_shared_arch), # pylint: disable=eval-used activation=e_activation, use_bias=FLAGS.e_model_use_bias, e_head_init=FLAGS.init_emodels_stddev) model = wide_resnet_hyperbatchensemble( input_shape=ds_info.features['image'].shape, depth=depth, width_multiplier=width, num_classes=num_classes, ensemble_size=FLAGS.ensemble_size, random_sign_init=FLAGS.random_sign_init, config=lambdas_config, e_models=e_models, l2_batchnorm_layer=FLAGS.l2_batchnorm, regularize_fast_weights=FLAGS.regularize_fast_weights, fast_weights_eq_contraint=FLAGS.fast_weights_eq_contraint, version=2) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # build hyper-batchensemble complete ------------------------- # Initialize Lambda distributions for tuning lambdas_mean = tf.reduce_mean( log_uniform_mean([lambdas_config.log_min, lambdas_config.log_max])) lambdas0 = tf.random.normal((FLAGS.ensemble_size, lambdas_config.dim), lambdas_mean, 0.1 * FLAGS.ens_init_delta_bounds) lower0 = lambdas0 - tf.constant(FLAGS.ens_init_delta_bounds) lower0 = tf.maximum(lower0, 1e-8) upper0 = lambdas0 + tf.constant(FLAGS.ens_init_delta_bounds) log_lower = tf.Variable(tf.math.log(lower0)) log_upper = tf.Variable(tf.math.log(upper0)) lambda_parameters = [log_lower, log_upper] # these variables are tuned clip_lambda_parameters(lambda_parameters, lambdas_config) # Optimizer settings to train model weights # Linearly scale learning rate and the decay epochs by vanilla settings. # Note: Here, we don't divide the epochs by 200 as for the other uncertainty # baselines. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [int(l) for l in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) # tuner used for optimizing lambda_parameters tuner = tf.keras.optimizers.Adam(FLAGS.lr_tuning) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'train/disagreement': tf.keras.metrics.Mean(), 'train/average_kl': tf.keras.metrics.Mean(), 'train/cosine_similarity': tf.keras.metrics.Mean(), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/gibbs_nll': tf.keras.metrics.Mean(), 'test/gibbs_accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/disagreement': tf.keras.metrics.Mean(), 'test/average_kl': tf.keras.metrics.Mean(), 'test/cosine_similarity': tf.keras.metrics.Mean(), 'validation/loss': tf.keras.metrics.Mean(), 'validation/loss_entropy': tf.keras.metrics.Mean(), 'validation/loss_ce': tf.keras.metrics.Mean() } corrupt_metrics = {} for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) if FLAGS.corruptions_interval > 0: for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) checkpoint = tf.train.Checkpoint(model=model, lambda_parameters=lambda_parameters, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint and FLAGS.restore_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) # generate lambdas lambdas = log_uniform_sample(per_core_batch_size, lambda_parameters) lambdas = tf.reshape(lambdas, (FLAGS.ensemble_size * per_core_batch_size, lambdas_config.dim)) with tf.GradientTape() as tape: logits = model([images, lambdas], training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) if FLAGS.use_gibbs_ce: # Average of single model CEs # tiling of labels should be only done for Gibbs CE loss labels = tf.tile(labels, [FLAGS.ensemble_size]) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) else: # Ensemble CE uses no tiling of the labels negative_log_likelihood = ensemble_crossentropy( labels, logits, FLAGS.ensemble_size) # Note: Divide l2_loss by sample_size (this differs from uncertainty_ # baselines implementation.) l2_loss = sum(model.losses) / train_sample_size loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate for fast weights. grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): if (('alpha' in var.name or 'gamma' in var.name) and 'batch_norm' not in var.name): grads_and_vars.append( (grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) probs = tf.nn.softmax(logits) per_probs = tf.split(probs, num_or_size_splits=FLAGS.ensemble_size, axis=0) per_probs_stacked = tf.stack(per_probs, axis=0) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) diversity_results = um.average_pairwise_diversity( per_probs_stacked, FLAGS.ensemble_size) for k, v in diversity_results.items(): metrics['train/' + k].update_state(v) if grads_and_vars: grads, _ = zip(*grads_and_vars) strategy.run(step_fn, args=(next(iterator), )) @tf.function def tuning_step(iterator): """Tuning StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(lambda_parameters) # sample lambdas if FLAGS.sample_and_tune: lambdas = log_uniform_sample(per_core_batch_size, lambda_parameters) else: lambdas = log_uniform_mean(lambda_parameters) lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0) lambdas = tf.reshape(lambdas, (FLAGS.ensemble_size * per_core_batch_size, lambdas_config.dim)) # ensemble CE logits = model([images, lambdas], training=False) ce = ensemble_crossentropy(labels, logits, FLAGS.ensemble_size) # entropy penalty for lambda distribution entropy = FLAGS.tau * log_uniform_entropy(lambda_parameters) loss = ce - entropy scaled_loss = loss / strategy.num_replicas_in_sync gradients = tape.gradient(loss, lambda_parameters) tuner.apply_gradients(zip(gradients, lambda_parameters)) metrics['validation/loss_ce'].update_state( ce / strategy.num_replicas_in_sync) metrics['validation/loss_entropy'].update_state( entropy / strategy.num_replicas_in_sync) metrics['validation/loss'].update_state(scaled_loss) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" # Note that we don't use tf.tile for labels here images, labels = inputs images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) # get lambdas lambdas = log_uniform_mean(lambda_parameters) rep_lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0) # eval on testsets logits = model([images, rep_lambdas], training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) per_probs = tf.split(probs, num_or_size_splits=FLAGS.ensemble_size, axis=0) # per member performance and gibbs performance (average per member perf) if dataset_name == 'clean': for i in range(FLAGS.ensemble_size): member_probs = per_probs[i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) labels_tile = tf.tile(labels, [FLAGS.ensemble_size]) metrics['test/gibbs_nll'].update_state( tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels_tile, logits, from_logits=True))) metrics['test/gibbs_accuracy'].update_state(labels_tile, probs) # ensemble performance negative_log_likelihood = ensemble_crossentropy( labels, logits, FLAGS.ensemble_size) probs = tf.reduce_mean(per_probs, axis=0) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) if dataset_name == 'clean': per_probs_stacked = tf.stack(per_probs, axis=0) diversity_results = um.average_pairwise_diversity( per_probs_stacked, FLAGS.ensemble_size) for k, v in diversity_results.items(): metrics['test/' + k].update_state(v) strategy.run(step_fn, args=(next(iterator), )) logging.info('--- Starting training using %d examples. ---', train_sample_size) train_iterator = iter(train_dataset) validation_iterator = iter(validation_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) do_tuning = (epoch >= FLAGS.tuning_warmup_epochs) if do_tuning and ((step + 1) % FLAGS.tuning_every_x_step == 0): tuning_step(validation_iterator) # clip lambda parameters if outside of range clip_lambda_parameters(lambda_parameters, lambdas_config) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) # evaluate on test data datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_step(test_iterator, dataset_name) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Validation Loss: %.4f, CE: %.4f, Entropy: %.4f', metrics['validation/loss'].result(), metrics['validation/loss_ce'].result(), metrics['validation/loss_entropy'].result()) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update({ name: metric.result() for name, metric in corrupt_metrics.items() }) total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() # save checkpoint and lambdas config if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) lambdas_cf = lambdas_config.get_config() filepath = os.path.join(FLAGS.output_dir, 'lambdas_config.p') with tf.io.gfile.GFile(filepath, 'wb') as fp: pickle.dump(lambdas_cf, fp, protocol=pickle.HIGHEST_PROTOCOL) logging.info('Saved checkpoint to %s', checkpoint_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) ds_info = tfds.builder(FLAGS.dataset).info batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = ds_info.splits['train'].num_examples // batch_size steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size num_classes = ds_info.features['label'].num_classes train_dataset = utils.load_dataset( split=tfds.Split.TRAIN, name=FLAGS.dataset, batch_size=batch_size, use_bfloat16=FLAGS.use_bfloat16) clean_test_dataset = utils.load_dataset( split=tfds.Split.TEST, name=FLAGS.dataset, batch_size=test_batch_size, use_bfloat16=FLAGS.use_bfloat16) train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_datasets = { 'clean': strategy.experimental_distribute_dataset(clean_test_dataset), } if FLAGS.corruptions_interval > 0: if FLAGS.dataset == 'cifar10': load_c_dataset = utils.load_cifar10_c else: load_c_dataset = functools.partial( utils.load_cifar100_c, path=FLAGS.cifar100_c_path) corruption_types, max_intensity = utils.load_corrupted_test_info( FLAGS.dataset) for corruption in corruption_types: for intensity in range(1, max_intensity + 1): dataset = load_c_dataset( corruption_name=corruption, corruption_intensity=intensity, batch_size=test_batch_size, use_bfloat16=FLAGS.use_bfloat16) test_datasets['{0}_{1}'.format(corruption, intensity)] = ( strategy.experimental_distribute_dataset(dataset)) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building ResNet model') model = ub.models.wide_resnet_condconv( input_shape=ds_info.features['image'].shape, depth=28, width_multiplier=FLAGS.resnet_width_multiplier, num_classes=num_classes, num_experts=FLAGS.num_experts, per_core_batch_size=FLAGS.per_core_batch_size, use_cond_dense=FLAGS.use_cond_dense, reduce_dense_outputs=FLAGS.reduce_dense_outputs, cond_placement=FLAGS.cond_placement, routing_fn=FLAGS.routing_fn, normalize_routing=FLAGS.normalize_routing, normalize_dense_routing=FLAGS.normalize_dense_routing, top_k=FLAGS.top_k, routing_pooling=FLAGS.routing_pooling, l2=FLAGS.l2) # reuse_routing=FLAGS.reuse_routing, # shared_routing_type=FLAGS.shared_routing_type) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200 for start_epoch_str in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD( lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), } if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense: metrics.update({ 'test/nll_poe': tf.keras.metrics.Mean(), 'test/nll_moe': tf.keras.metrics.Mean(), 'test/nll_unweighted_poe': tf.keras.metrics.Mean(), 'test/nll_unweighted_moe': tf.keras.metrics.Mean(), 'test/unweighted_gibbs_ce': tf.keras.metrics.Mean(), 'test/ece_unweighted_moe': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/accuracy_unweighted_moe': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece_poe': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/accuracy_poe': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece_unweighted_poe': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/accuracy_unweighted_poe': tf.keras.metrics.SparseCategoricalAccuracy(), }) for idx in range(FLAGS.num_experts): metrics['test/dense_routing_weight_{}'.format( idx)] = tf.keras.metrics.Mean() metrics['test/dense_routing_weight_normalized_{}'.format( idx)] = tf.keras.metrics.Mean() if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) corrupt_metrics['test/nll_weighted_moe_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_weighted_moe_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_weighted_moe_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch def _process_3d_logits(logits, routing_weights, labels): routing_weights_3d = tf.expand_dims(routing_weights, axis=-1) weighted_logits = tf.math.reduce_mean(routing_weights_3d * logits, axis=1) unweighted_logits = tf.math.reduce_mean(logits, axis=1) probs = tf.nn.softmax(logits) unweighted_probs = tf.math.reduce_mean(probs, axis=1) weighted_probs = tf.math.reduce_sum(routing_weights_3d * probs, axis=1) labels_broadcasted = tf.tile( tf.reshape(labels, (-1, 1)), (1, FLAGS.num_experts)) neg_log_likelihoods = tf.keras.losses.sparse_categorical_crossentropy( labels_broadcasted, logits, from_logits=True) unweighted_gibbs_ce = tf.math.reduce_mean(neg_log_likelihoods) weighted_gibbs_ce = tf.math.reduce_mean( tf.math.reduce_sum(routing_weights * neg_log_likelihoods, axis=1)) return { 'weighted_logits': weighted_logits, 'unweighted_logits': unweighted_logits, 'unweighted_probs': unweighted_probs, 'weighted_probs': weighted_probs, 'neg_log_likelihoods': neg_log_likelihoods, 'unweighted_gibbs_ce': unweighted_gibbs_ce, 'weighted_gibbs_ce': weighted_gibbs_ce } def _process_3d_logits_train(logits, routing_weights, labels): processing_results = _process_3d_logits(logits, routing_weights, labels) if FLAGS.loss == 'gibbs_ce': probs = processing_results['weighted_probs'] negative_log_likelihood = processing_results['weighted_gibbs_ce'] elif FLAGS.loss == 'unweighted_gibbs_ce': probs = processing_results['unweighted_probs'] negative_log_likelihood = processing_results['unweighted_gibbs_ce'] elif FLAGS.loss == 'moe': probs = processing_results['weighted_probs'] negative_log_likelihood = tf.math.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, probs, from_logits=False)) elif FLAGS.loss == 'unweighted_moe': probs = processing_results['unweighted_probs'] negative_log_likelihood = tf.math.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, probs, from_logits=False)) elif FLAGS.loss == 'poe': probs = tf.softmax(processing_results['weighted_logits']) negative_log_likelihood = tf.math.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, processing_results['weighted_logits'], from_logits=True)) elif FLAGS.loss == 'unweighted_poe': probs = tf.softmax(processing_results['unweighted_logits']) negative_log_likelihood = tf.math.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, processing_results['unweighted_logits'], from_logits=True)) return probs, negative_log_likelihood def _process_3d_logits_test(routing_weights, logits, labels): processing_results = _process_3d_logits(logits, routing_weights, labels) nll_poe = tf.math.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, processing_results['weighted_logits'], from_logits=True)) nll_unweighted_poe = tf.math.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, processing_results['unweighted_logits'], from_logits=True)) nll_moe = tf.math.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, processing_results['weighted_probs'], from_logits=False)) nll_unweighted_moe = tf.math.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, processing_results['unweighted_probs'], from_logits=False)) return { 'nll_poe': nll_poe, 'nll_moe': nll_moe, 'nll_unweighted_poe': nll_unweighted_poe, 'nll_unweighted_moe': nll_unweighted_moe, 'unweighted_gibbs_ce': processing_results['unweighted_gibbs_ce'], 'weighted_gibbs_ce': processing_results['weighted_gibbs_ce'], 'weighted_probs': processing_results['weighted_probs'], 'unweighted_probs': processing_results['unweighted_probs'], 'weighted_logits': processing_results['weighted_logits'], 'unweighted_logits': processing_results['unweighted_logits'] } @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) # if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense: if not isinstance(logits, tuple): raise ValueError('Logits are not a tuple.') # logits is a `Tensor` of shape [batch_size, num_experts, num_classes] logits, all_routing_weights = logits # routing_weights is a `Tensor` of shape [batch_size, num_experts] routing_weights = all_routing_weights[-1] if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense: probs, negative_log_likelihood = _process_3d_logits_train( logits, routing_weights, labels) else: probs = tf.nn.softmax(logits) # Prior to reduce_mean the NLLs are of the shape [batch, num_experts]. negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) l2_loss = sum(model.losses) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator),)) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) if not isinstance(logits, tuple): raise ValueError('Logits not a tuple') # logits is a `Tensor` of shape [batch_size, num_experts, num_classes] # routing_weights is a `Tensor` of shape [batch_size, num_experts] logits, all_routing_weights = logits routing_weights = all_routing_weights[-1] if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense: results = _process_3d_logits_test(routing_weights, logits, labels) else: probs = tf.nn.softmax(logits) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, probs)) if dataset_name == 'clean': if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense: metrics['test/nll_poe'].update_state(results['nll_poe']) metrics['test/nll_moe'].update_state(results['nll_moe']) metrics['test/nll_unweighted_poe'].update_state( results['nll_unweighted_poe']) metrics['test/nll_unweighted_moe'].update_state( results['nll_unweighted_moe']) metrics['test/unweighted_gibbs_ce'].update_state( results['unweighted_gibbs_ce']) metrics['test/negative_log_likelihood'].update_state( results['weighted_gibbs_ce']) metrics['test/ece'].update_state(labels, results['weighted_probs']) metrics['test/accuracy'].update_state(labels, results['weighted_probs']) metrics['test/ece_unweighted_moe'].update_state( labels, results['unweighted_probs']) metrics['test/accuracy_unweighted_moe'].update_state( labels, results['unweighted_probs']) metrics['test/ece_poe'].update_state(labels, results['weighted_logits']) metrics['test/accuracy_poe'].update_state(labels, results['weighted_logits']) metrics['test/ece_unweighted_poe'].update_state( labels, results['unweighted_logits']) metrics['test/accuracy_unweighted_poe'].update_state( labels, results['unweighted_logits']) # TODO(ghassen): summarize all routing weights not only last layer's. average_routing_weights = tf.math.reduce_mean(routing_weights, axis=0) routing_weights_sum = tf.math.reduce_sum(average_routing_weights) for idx in range(FLAGS.num_experts): metrics['test/dense_routing_weight_{}'.format(idx)].update_state( average_routing_weights[idx]) metrics['test/dense_routing_weight_normalized_{}'.format( idx)].update_state(average_routing_weights[idx] / routing_weights_sum) # TODO(ghassen): add more metrics for expert utilization, # load loss and importance/balance loss. else: metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) else: # TODO(ghassen): figure out how to aggregate probs for the OOD case. if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense: corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state( results['unweighted_gibbs_ce']) corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state( labels, results['unweighted_probs']) corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state( labels, results['unweighted_probs']) corrupt_metrics['test/nll_weighted_moe{}'.format( dataset_name)].update_state(results['weighted_gibbs_ce']) corrupt_metrics['test/accuracy_weighted_moe_{}'.format( dataset_name)].update_state(labels, results['weighted_probs']) corrupt_metrics['test/ece_weighted_moe{}'.format( dataset_name)].update_state(labels, results['weighted_probs']) else: corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state( negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state( labels, probs) corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state( labels, probs) strategy.run(step_fn, args=(next(iterator),)) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics, corruption_types, max_intensity) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) total_results = {name: metric.result() for name, metric in metrics.items()} total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) train_batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.batch_repetitions) test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // train_batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // test_batch_size if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) builder = utils.ImageNetInput(data_dir=FLAGS.data_dir, use_bfloat16=FLAGS.use_bfloat16) train_dataset = builder.as_dataset(split=tfds.Split.TRAIN, batch_size=train_batch_size) test_dataset = builder.as_dataset(split=tfds.Split.TEST, batch_size=test_batch_size) train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_dataset = strategy.experimental_distribute_dataset(test_dataset) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) with strategy.scope(): logging.info('Building Keras ResNet-50 model') model = ub.models.resnet50_mimo( input_shape=(FLAGS.ensemble_size, 224, 224, 3), num_classes=NUM_CLASSES, ensemble_size=FLAGS.ensemble_size, width_multiplier=FLAGS.width_multiplier) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * train_batch_size / 256 lr_schedule = [ # (multiplier, epoch to start) tuples (1.0, FLAGS.lr_warmup_epochs), (0.1, int(FLAGS.lr_decay_epochs[0])), (0.01, int(FLAGS.lr_decay_epochs[1])), (0.001, int(FLAGS.lr_decay_epochs[2])) ] learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr, FLAGS.train_epochs, lr_schedule) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), } for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) test_diversity = { 'test/disagreement': tf.keras.metrics.Mean(), 'test/average_kl': tf.keras.metrics.Mean(), 'test/cosine_similarity': tf.keras.metrics.Mean(), } logging.info('Finished building Keras ResNet-50 model') checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs batch_size = tf.shape(images)[0] main_shuffle = tf.random.shuffle( tf.tile(tf.range(batch_size), [FLAGS.batch_repetitions])) to_shuffle = tf.cast( tf.cast(tf.shape(main_shuffle)[0], tf.float32) * (1. - FLAGS.input_repetition_probability), tf.int32) shuffle_indices = [ tf.concat([ tf.random.shuffle(main_shuffle[:to_shuffle]), main_shuffle[to_shuffle:] ], axis=0) for _ in range(FLAGS.ensemble_size) ] images = tf.stack([ tf.gather(images, indices, axis=0) for indices in shuffle_indices ], axis=1) labels = tf.stack([ tf.gather(labels, indices, axis=0) for indices in shuffle_indices ], axis=1) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.reduce_sum( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True), axis=1)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the weights. This excludes BN parameters and biases, but # pay caution to their naming scheme. if 'kernel' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(tf.reshape(logits, [-1, NUM_CLASSES])) flat_labels = tf.reshape(labels, [-1]) metrics['train/ece'].update_state(flat_labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(flat_labels, probs) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs images = tf.tile(tf.expand_dims(images, 1), [1, FLAGS.ensemble_size, 1, 1, 1]) logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) per_probs = tf.transpose(probs, perm=[1, 0, 2]) diversity_results = um.average_pairwise_diversity( per_probs, FLAGS.ensemble_size) for k, v in diversity_results.items(): test_diversity['test/' + k].update_state(v) for i in range(FLAGS.ensemble_size): member_probs = probs[:, i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) # Negative log marginal likelihood computed in a numerically-stable way. labels_tiled = tf.tile(tf.expand_dims(labels, 1), [1, FLAGS.ensemble_size]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_tiled, logits, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[1]) + tf.math.log(float(FLAGS.ensemble_size))) probs = tf.math.reduce_mean(probs, axis=1) # marginalize metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) test_iterator = iter(test_dataset) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator) ms_per_example = (time.time() - test_start_time) * 1e6 / test_batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_metrics = metrics.copy() total_metrics.update(test_diversity) total_results = { name: metric.result() for name, metric in total_metrics.items() } with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for _, metric in total_metrics.items(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_save_name = os.path.join(FLAGS.output_dir, 'model') model.save(final_save_name) logging.info('Saved model to %s', final_save_name)
def main(argv): del argv # unused arg tf.enable_v2_behavior() tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) def train_input_fn(ctx): """Sets up local (per-core) dataset batching.""" dataset = utils.load_distributed_dataset( split=tfds.Split.TRAIN, name=FLAGS.dataset, batch_size=FLAGS.per_core_batch_size // FLAGS.num_models, drop_remainder=True, use_bfloat16=FLAGS.use_bfloat16, proportion=FLAGS.train_proportion) if ctx and ctx.num_input_pipelines > 1: dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) return dataset # No matter what percentage of training proportion, we still evaluate the # model on the full test dataset. def test_input_fn(ctx): """Sets up local (per-core) dataset batching.""" dataset = utils.load_distributed_dataset( split=tfds.Split.TEST, name=FLAGS.dataset, batch_size=FLAGS.per_core_batch_size // FLAGS.num_models, drop_remainder=True, use_bfloat16=FLAGS.use_bfloat16) if ctx and ctx.num_input_pipelines > 1: dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) return dataset train_dataset = strategy.experimental_distribute_datasets_from_function( train_input_fn) test_dataset = strategy.experimental_distribute_datasets_from_function( test_input_fn) ds_info = tfds.builder(FLAGS.dataset).info batch_size = ((FLAGS.per_core_batch_size // FLAGS.num_models) * FLAGS.num_cores) # Train_proportion is a float so need to convert steps_per_epoch to int. steps_per_epoch = int( (ds_info.splits['train'].num_examples * FLAGS.train_proportion) // batch_size) steps_per_eval = ds_info.splits['test'].num_examples // batch_size if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras ResNet-32 model') model = batchensemble_model.ensemble_resnet_v1( input_shape=ds_info.features['image'].shape, depth=32, num_classes=ds_info.features['label'].num_classes, width_multiplier=4, num_models=FLAGS.num_models, random_sign_init=FLAGS.random_sign_init, dropout_rate=FLAGS.dropout_rate, l2=FLAGS.l2) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [ np.floor(FLAGS.train_epochs / 200 * start_epoch) for start_epoch in FLAGS.lr_decay_epochs ] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), } for i in range(FLAGS.num_models): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs if FLAGS.version2: images = tf.tile(images, [FLAGS.num_models, 1, 1, 1]) labels = tf.tile(labels, [FLAGS.num_models]) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) l2_loss = sum(model.losses) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate implementation. if FLAGS.fast_weight_lr_multiplier != 1.0: grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): # Apply different learning rate on the fast weight approximate # posterior/prior parameters. This is excludes BN and slow weights, # but pay caution to the naming scheme. if ('batch_norm' not in var.name and 'kernel' not in var.name): grads_and_vars.append( (grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) else: optimizer.apply_gradients(zip(grads, model.trainable_variables)) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) strategy.experimental_run_v2(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs images = tf.tile(images, [FLAGS.num_models, 1, 1, 1]) logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) per_probs = tf.split(probs, num_or_size_splits=FLAGS.num_models, axis=0) for i in range(FLAGS.num_models): member_probs = per_probs[i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) probs = tf.reduce_mean(per_probs, axis=0) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, probs)) metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) strategy.experimental_run_v2(step_fn, args=(next(iterator), )) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) test_iterator = iter(test_dataset) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_step(test_iterator) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.num_models): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) with summary_writer.as_default(): for name, metric in metrics.items(): tf.summary.scalar(name, metric.result(), step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (epoch + 1) % 20 == 0: checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) train_input_fn = utils.load_input_fn( split=tfds.Split.TRAIN, name=FLAGS.dataset, batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size, use_bfloat16=FLAGS.use_bfloat16) clean_test_input_fn = utils.load_input_fn( split=tfds.Split.TEST, name=FLAGS.dataset, batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size, use_bfloat16=FLAGS.use_bfloat16) train_dataset = strategy.experimental_distribute_datasets_from_function( train_input_fn) test_datasets = { 'clean': strategy.experimental_distribute_datasets_from_function( clean_test_input_fn), } if FLAGS.corruptions_interval > 0: if FLAGS.dataset == 'cifar10': load_c_input_fn = utils.load_cifar10_c_input_fn else: load_c_input_fn = functools.partial(utils.load_cifar100_c_input_fn, path=FLAGS.cifar100_c_path) corruption_types, max_intensity = utils.load_corrupted_test_info( FLAGS.dataset) for corruption in corruption_types: for intensity in range(1, max_intensity + 1): input_fn = load_c_input_fn( corruption_name=corruption, corruption_intensity=intensity, batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size, use_bfloat16=FLAGS.use_bfloat16) test_datasets['{0}_{1}'.format(corruption, intensity)] = ( strategy.experimental_distribute_datasets_from_function( input_fn)) ds_info = tfds.builder(FLAGS.dataset).info batch_size = ((FLAGS.per_core_batch_size // FLAGS.ensemble_size) * FLAGS.num_cores) train_dataset_size = ds_info.splits['train'].num_examples steps_per_epoch = train_dataset_size // batch_size test_dataset_size = ds_info.splits['test'].num_examples steps_per_eval = test_dataset_size // batch_size num_classes = ds_info.features['label'].num_classes if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras model') model = ub.models.wide_resnet_rank1( input_shape=ds_info.features['image'].shape, depth=28, width_multiplier=10, num_classes=num_classes, alpha_initializer=FLAGS.alpha_initializer, gamma_initializer=FLAGS.gamma_initializer, alpha_regularizer=FLAGS.alpha_regularizer, gamma_regularizer=FLAGS.gamma_regularizer, use_additive_perturbation=FLAGS.use_additive_perturbation, ensemble_size=FLAGS.ensemble_size, random_sign_init=FLAGS.random_sign_init, dropout_rate=FLAGS.dropout_rate, prior_mean=FLAGS.prior_mean, prior_stddev=FLAGS.prior_stddev) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200 for start_epoch_str in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/kl': tf.keras.metrics.Mean(), 'train/kl_scale': tf.keras.metrics.Mean(), 'train/elbo': tf.keras.metrics.Mean(), 'train/loss': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/kl': tf.keras.metrics.Mean(), 'test/elbo': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), } if FLAGS.ensemble_size > 1: for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format( i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/kl_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/elbo_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch def compute_l2_loss(model): filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the BN parameters and bias terms. This # excludes only fast weight approximate posterior/prior parameters, # but pay caution to their naming scheme. if ('kernel' in var.name or 'batch_norm' in var.name or 'bias' in var.name): filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) return l2_loss @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs if FLAGS.ensemble_size > 1: images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) labels = tf.tile(labels, [FLAGS.ensemble_size]) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) l2_loss = compute_l2_loss(model) kl = sum(model.losses) / train_dataset_size kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype) kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs kl_scale = tf.minimum(1., kl_scale) kl_loss = kl_scale * kl # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss + kl_loss scaled_loss = loss / strategy.num_replicas_in_sync elbo = -(negative_log_likelihood + l2_loss + kl) grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate implementation. if FLAGS.fast_weight_lr_multiplier != 1.0: grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): # Apply different learning rate on the fast weight approximate # posterior/prior parameters. This is excludes BN and slow weights, # but pay caution to the naming scheme. if ('kernel' not in var.name and 'batch_norm' not in var.name and 'bias' not in var.name): grads_and_vars.append( (grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) else: optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(logits) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/kl'].update_state(kl) metrics['train/kl_scale'].update_state(kl_scale) metrics['train/elbo'].update_state(elbo) metrics['train/loss'].update_state(loss) metrics['train/accuracy'].update_state(labels, probs) metrics['train/ece'].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs if FLAGS.ensemble_size > 1: images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) logits = tf.reshape([ model(images, training=False) for _ in range(FLAGS.num_eval_samples) ], [FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, num_classes]) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) if FLAGS.ensemble_size > 1: per_probs = tf.reduce_mean(probs, axis=0) # marginalize samples for i in range(FLAGS.ensemble_size): member_probs = per_probs[i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) # Negative log marginal likelihood computed in a numerically-stable way. labels_broadcasted = tf.broadcast_to( labels, [FLAGS.num_eval_samples, FLAGS.ensemble_size, labels.shape[0]]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_broadcasted, logits, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) + tf.math.log(float(FLAGS.num_eval_samples * FLAGS.ensemble_size))) probs = tf.math.reduce_mean(probs, axis=[0, 1]) # marginalize l2_loss = compute_l2_loss(model) kl = sum(model.losses) / test_dataset_size elbo = -(negative_log_likelihood + l2_loss + kl) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/kl'].update_state(kl) metrics['test/elbo'].update_state(elbo) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/kl_{}'.format( dataset_name)].update_state(kl) corrupt_metrics['test/elbo_{}'.format( dataset_name)].update_state(elbo) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator), )) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_step(test_iterator, dataset_name) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) if FLAGS.ensemble_size > 1: for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) ds_info = tfds.builder(FLAGS.dataset).info batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.num_dropout_samples_training) test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores num_classes = ds_info.features['label'].num_classes aug_params = { 'augmix': FLAGS.augmix, 'aug_count': FLAGS.aug_count, 'augmix_depth': FLAGS.augmix_depth, 'augmix_prob_coeff': FLAGS.augmix_prob_coeff, 'augmix_width': FLAGS.augmix_width, 'ensemble_size': 1, 'mixup_alpha': FLAGS.mixup_alpha, } validation_proportion = 1. - FLAGS.train_proportion use_validation_set = validation_proportion > 0. train_dataset = data_utils.load_dataset( split=tfds.Split.TRAIN, name=FLAGS.dataset, batch_size=batch_size, use_bfloat16=FLAGS.use_bfloat16, aug_params=aug_params, validation_set=use_validation_set, validation_proportion=validation_proportion) train_sample_size = ds_info.splits[ 'train'].num_examples * FLAGS.train_proportion val_sample_size = ds_info.splits['train'].num_examples - train_sample_size if use_validation_set: validation_dataset = data_utils.load_dataset( split=tfds.Split.VALIDATION, name=FLAGS.dataset, batch_size=batch_size, use_bfloat16=FLAGS.use_bfloat16, aug_params=aug_params, validation_set=use_validation_set, validation_proportion=validation_proportion) validation_dataset = strategy.experimental_distribute_dataset( validation_dataset) steps_per_val = steps_per_epoch = int(val_sample_size / batch_size) clean_test_dataset = utils.load_dataset(split=tfds.Split.TEST, name=FLAGS.dataset, batch_size=test_batch_size, use_bfloat16=FLAGS.use_bfloat16) train_sample_size = ds_info.splits[ 'train'].num_examples * FLAGS.train_proportion steps_per_epoch = int(train_sample_size / batch_size) steps_per_epoch = ds_info.splits['train'].num_examples // batch_size steps_per_eval = ds_info.splits['test'].num_examples // batch_size train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_datasets = { 'clean': strategy.experimental_distribute_dataset(clean_test_dataset), } if FLAGS.corruptions_interval > 0: if FLAGS.dataset == 'cifar10': load_c_dataset = utils.load_cifar10_c else: load_c_dataset = functools.partial(utils.load_cifar100_c, path=FLAGS.cifar100_c_path) corruption_types, max_intensity = utils.load_corrupted_test_info( FLAGS.dataset) for corruption in corruption_types: for intensity in range(1, max_intensity + 1): dataset = load_c_dataset(corruption_name=corruption, corruption_intensity=intensity, batch_size=test_batch_size, use_bfloat16=FLAGS.use_bfloat16) test_datasets['{0}_{1}'.format(corruption, intensity)] = ( strategy.experimental_distribute_dataset(dataset)) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building ResNet model') if FLAGS.use_spec_norm: logging.info('Use Spectral Normalization with norm bound %.2f', FLAGS.spec_norm_bound) if FLAGS.use_gp_layer: logging.info('Use GP layer with hidden units %d', FLAGS.gp_hidden_dim) model = ub.models.wide_resnet_sngp( input_shape=ds_info.features['image'].shape, batch_size=batch_size, depth=28, width_multiplier=10, num_classes=num_classes, l2=FLAGS.l2, use_mc_dropout=FLAGS.use_mc_dropout, use_filterwise_dropout=FLAGS.use_filterwise_dropout, dropout_rate=FLAGS.dropout_rate, use_gp_layer=FLAGS.use_gp_layer, gp_input_dim=FLAGS.gp_input_dim, gp_hidden_dim=FLAGS.gp_hidden_dim, gp_scale=FLAGS.gp_scale, gp_bias=FLAGS.gp_bias, gp_input_normalization=FLAGS.gp_input_normalization, gp_random_feature_type=FLAGS.gp_random_feature_type, gp_cov_discount_factor=FLAGS.gp_cov_discount_factor, gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty, use_spec_norm=FLAGS.use_spec_norm, spec_norm_iteration=FLAGS.spec_norm_iteration, spec_norm_bound=FLAGS.spec_norm_bound) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200 for start_epoch_str in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/stddev': tf.keras.metrics.Mean(), } if use_validation_set: metrics.update({ 'val/negative_log_likelihood': tf.keras.metrics.Mean(), 'val/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'val/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'val/stddev': tf.keras.metrics.Mean(), }) if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) corrupt_metrics['test/stddev_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator, step): """Training StepFn.""" def step_fn(inputs, step): """Per-Replica StepFn.""" images, labels = inputs if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0: # Resetting covaraince estimator at the begining of a new epoch. model.layers[-1].reset_covariance_matrix() if FLAGS.augmix and FLAGS.aug_count >= 1: # Index 0 at augmix preprocessing is the unperturbed image. images = images[:, 1, ...] # This is for the case of combining AugMix and Mixup. if FLAGS.mixup_alpha > 0: labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1] images = tf.tile(images, [FLAGS.num_dropout_samples_training, 1, 1, 1]) if FLAGS.mixup_alpha > 0: labels = tf.tile(labels, [FLAGS.num_dropout_samples_training, 1]) else: labels = tf.tile(labels, [FLAGS.num_dropout_samples_training]) with tf.GradientTape() as tape: logits = model(images, training=True) if isinstance(logits, tuple): # If model returns a tuple of (logits, covmat), extract logits logits, _ = logits if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) if FLAGS.mixup_alpha > 0: negative_log_likelihood = tf.reduce_mean( tf.keras.losses.categorical_crossentropy( labels, logits, from_logits=True)) else: negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) l2_loss = sum(model.losses) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(logits) if FLAGS.mixup_alpha > 0: labels = tf.argmax(labels, axis=-1) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) strategy.run(step_fn, args=(next(iterator), step)) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs logits_list = [] stddev_list = [] for _ in range(FLAGS.num_dropout_samples): logits = model(images, training=False) if isinstance(logits, tuple): # If model returns a tuple of (logits, covmat), extract both logits, covmat = logits else: covmat = tf.eye(FLAGS.per_core_batch_size) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) logits = ed.layers.utils.mean_field_logits( logits, covmat, mean_field_factor=FLAGS.gp_mean_field_factor) stddev = tf.sqrt(tf.linalg.diag_part(covmat)) stddev_list.append(stddev) logits_list.append(logits) # Logits dimension is (num_samples, batch_size, num_classes). logits_list = tf.stack(logits_list, axis=0) stddev_list = tf.stack(stddev_list, axis=0) stddev = tf.reduce_mean(stddev_list, axis=0) probs_list = tf.nn.softmax(logits_list) probs = tf.reduce_mean(probs_list, axis=0) labels_broadcasted = tf.broadcast_to( labels, [FLAGS.num_dropout_samples, labels.shape[0]]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_broadcasted, logits_list, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[0]) + tf.math.log(float(FLAGS.num_dropout_samples))) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) metrics['test/stddev'].update_state(stddev) elif dataset_name == 'val': metrics['val/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['val/accuracy'].update_state(labels, probs) metrics['val/ece'].update_state(labels, probs) metrics['val/stddev'].update_state(stddev) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/stddev_{}'.format( dataset_name)].update_state(stddev) strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) step_variable = tf.Variable(0, dtype=tf.int32) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): step_variable.assign(step) # Pass `step` as a tf.Variable to train_step to prevent the tf.function # train_step() re-compiling itself at each function call. train_step(train_iterator, step_variable) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if use_validation_set: datasets_to_evaluate['val'] = validation_dataset if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) steps_per_eval = steps_per_val if dataset_name == 'val' else steps_per_eval for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) if use_validation_set: logging.info('Val NLL: %.4f, Accuracy: %.2f%%', metrics['val/negative_log_likelihood'].result(), metrics['val/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name) final_save_name = os.path.join(FLAGS.output_dir, 'model') model.save(final_save_name) logging.info('Saved model to %s', final_save_name)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) ds_info = tfds.builder(FLAGS.dataset).info batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.num_dropout_samples_training) test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = ds_info.splits['train'].num_examples // batch_size steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size num_classes = ds_info.features['label'].num_classes train_dataset = utils.load_dataset(split=tfds.Split.TRAIN, name=FLAGS.dataset, batch_size=batch_size, use_bfloat16=FLAGS.use_bfloat16) clean_test_dataset = utils.load_dataset(split=tfds.Split.TEST, name=FLAGS.dataset, batch_size=test_batch_size, use_bfloat16=FLAGS.use_bfloat16) train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_datasets = { 'clean': strategy.experimental_distribute_dataset(clean_test_dataset), } if FLAGS.corruptions_interval > 0: if FLAGS.dataset == 'cifar10': load_c_dataset = utils.load_cifar10_c else: load_c_dataset = functools.partial(utils.load_cifar100_c, path=FLAGS.cifar100_c_path) corruption_types, max_intensity = utils.load_corrupted_test_info( FLAGS.dataset) for corruption in corruption_types: for intensity in range(1, max_intensity + 1): dataset = load_c_dataset(corruption_name=corruption, corruption_intensity=intensity, batch_size=test_batch_size, use_bfloat16=FLAGS.use_bfloat16) test_datasets['{0}_{1}'.format(corruption, intensity)] = ( strategy.experimental_distribute_dataset(dataset)) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building ResNet model') model = ub.models.wide_resnet_dropout( input_shape=ds_info.features['image'].shape, depth=28, width_multiplier=10, num_classes=num_classes, l2=FLAGS.l2, dropout_rate=FLAGS.dropout_rate, residual_dropout=FLAGS.residual_dropout, filterwise_dropout=FLAGS.filterwise_dropout) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200 for start_epoch_str in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins), } if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs images = tf.tile(images, [FLAGS.num_dropout_samples_training, 1, 1, 1]) labels = tf.tile(labels, [FLAGS.num_dropout_samples_training]) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) l2_loss = sum(model.losses) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(logits) metrics['train/ece'].update_state(labels, probs) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs logits_list = [] for _ in range(FLAGS.num_dropout_samples): logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) logits_list.append(logits) # Logits dimension is (num_samples, batch_size, num_classes). logits_list = tf.stack(logits_list, axis=0) probs_list = tf.nn.softmax(logits_list) probs = tf.reduce_mean(probs_list, axis=0) labels_broadcasted = tf.broadcast_to( labels, [FLAGS.num_dropout_samples, labels.shape[0]]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_broadcasted, logits_list, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[0]) + tf.math.log(float(FLAGS.num_dropout_samples))) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].update_state(labels, probs) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format( dataset_name)].update_state(labels, probs) strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv): del argv # unused arg tf.enable_v2_behavior() tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) def train_input_fn(ctx): """Sets up local (per-core) dataset batching.""" dataset = utils.load_distributed_dataset( split=tfds.Split.TRAIN, name=FLAGS.dataset, batch_size=FLAGS.per_core_batch_size, drop_remainder=True, use_bfloat16=FLAGS.use_bfloat16, normalize=True) if ctx and ctx.num_input_pipelines > 1: dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) return dataset def test_input_fn(ctx): """Sets up local (per-core) dataset batching.""" dataset = utils.load_distributed_dataset( split=tfds.Split.TEST, name=FLAGS.dataset, batch_size=FLAGS.per_core_batch_size, drop_remainder=True, use_bfloat16=FLAGS.use_bfloat16, normalize=True) if ctx and ctx.num_input_pipelines > 1: dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) return dataset train_dataset = strategy.experimental_distribute_datasets_from_function( train_input_fn) test_dataset = strategy.experimental_distribute_datasets_from_function( test_input_fn) ds_info = tfds.builder(FLAGS.dataset).info batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = ds_info.splits['train'].num_examples // batch_size steps_per_eval = ds_info.splits['test'].num_examples // batch_size if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building ResNet model') model = wide_resnet(input_shape=ds_info.features['image'].shape, depth=28, width_multiplier=10, num_classes=ds_info.features['label'].num_classes, l2=FLAGS.l2, version=2) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [(start_epoch * FLAGS.train_epochs) // 200 for start_epoch in FLAGS.lr_decay_epochs] lr_schedule = utils.LearningRateSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=0.9, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), } checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) l2_loss = sum(model.losses) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) strategy.experimental_run_v2(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, probs)) metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) strategy.experimental_run_v2(step_fn, args=(next(iterator), )) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) test_iterator = iter(test_dataset) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_step(test_iterator) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) with summary_writer.as_default(): for name, metric in metrics.items(): tf.summary.scalar(name, metric.result(), step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (epoch + 1) % 20 == 0: checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name)