def generate_tables_definition(self): import tables stim_template_dict = {'odor': tables.StringCol(32), 'vialconc': tables.Float64Col()} for i in xrange(len(self.mfcs)): k = 'mfc_{0}_flow'.format(i) stim_template_dict[k] = tables.Float64Col() if self.dilutors: stim_template_dict['dilutors'] = dict() for i in xrange(len(self.dilutors)): k = 'dilutor_{0}'.format(i) dilutor = self.dilutors[i] stim_template_dict['dilutors'][k] = dilutor.generate_tables_definition() return flatten_dictionary(stim_template_dict)
def main(argv): del argv # unused arg if not FLAGS.use_gpu: raise ValueError('Only GPU is currently supported.') if FLAGS.num_cores > 1: raise ValueError('Only a single accelerator is currently supported.') tf.random.set_seed(FLAGS.seed) tf.io.gfile.makedirs(FLAGS.output_dir) ds_info = tfds.builder(FLAGS.dataset).info batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_eval = ds_info.splits['test'].num_examples // batch_size num_classes = ds_info.features['label'].num_classes data_dir = FLAGS.data_dir dataset = ub.datasets.get( FLAGS.dataset, data_dir=data_dir, download_data=FLAGS.download_data, split=tfds.Split.TEST).load(batch_size=batch_size) validation_percent = 1. - FLAGS.train_proportion val_dataset = ub.datasets.get( dataset_name=FLAGS.dataset, data_dir=data_dir, download_data=FLAGS.download_data, split=tfds.Split.VALIDATION, validation_percent=validation_percent, drop_remainder=False).load(batch_size=batch_size) steps_per_val_eval = int(ds_info.splits['train'].num_examples * validation_percent) // batch_size test_datasets = {'clean': dataset} if FLAGS.dataset == 'cifar100': data_dir = FLAGS.cifar100_c_path corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset) for corruption_type in corruption_types: for severity in range(1, 6): dataset = ub.datasets.get( f'{FLAGS.dataset}_corrupted', corruption_type=corruption_type, data_dir=data_dir, severity=severity, split=tfds.Split.TEST).load(batch_size=batch_size) test_datasets[f'{corruption_type}_{severity}'] = dataset model = ub.models.wide_resnet( input_shape=ds_info.features['image'].shape, depth=28, width_multiplier=10, num_classes=num_classes, l2=0.) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Search for checkpoints ensemble_filenames = parse_checkpoint_dir(FLAGS.checkpoint_dir) model_pool_size = len(ensemble_filenames) logging.info('Model pool size: %s', model_pool_size) logging.info('Ensemble size: %s', FLAGS.ensemble_size) logging.info('Ensemble number of weights: %s', FLAGS.ensemble_size * model.count_params()) logging.info('Ensemble filenames: %s', str(ensemble_filenames)) checkpoint = tf.train.Checkpoint(model=model) # Compute the logits on the validation set val_logits, val_labels = [], [] for m, ensemble_filename in enumerate(ensemble_filenames): # Enforce memory clean-up tf.keras.backend.clear_session() checkpoint.restore(ensemble_filename) val_iterator = iter(val_dataset) val_logits_m = [] for _ in range(steps_per_val_eval): inputs = next(val_iterator) features = inputs['features'] labels = inputs['labels'] val_logits_m.append(model(features, training=False)) if m == 0: val_labels.append(labels) val_logits.append(tf.concat(val_logits_m, axis=0)) if m == 0: val_labels = tf.concat(val_labels, axis=0) percent = (m + 1.) / model_pool_size message = ('{:.1%} completion for prediction on validation set: ' 'model {:d}/{:d}.'.format(percent, m + 1, model_pool_size)) logging.info(message) selected_members, val_acc, val_nll = greedy_selection(val_logits, val_labels, FLAGS.ensemble_size, FLAGS.greedy_objective) unique_selected_members = list(set(selected_members)) message = ('Members selected by greedy procedure: {} (with {} unique ' 'member(s))\n\t{}').format( selected_members, len(unique_selected_members), [ensemble_filenames[i] for i in selected_members]) logging.info(message) val_metrics = { 'val/accuracy': tf.keras.metrics.Mean(), 'val/negative_log_likelihood': tf.keras.metrics.Mean() } val_metrics['val/accuracy'].update_state(val_acc) val_metrics['val/negative_log_likelihood'].update_state(val_nll) # Write model predictions to files. num_datasets = len(test_datasets) for m, member_id in enumerate(unique_selected_members): ensemble_filename = ensemble_filenames[member_id] checkpoint.restore(ensemble_filename) for n, (name, test_dataset) in enumerate(test_datasets.items()): filename = '{dataset}_{member}.npy'.format(dataset=name, member=member_id) filename = os.path.join(FLAGS.output_dir, filename) if not tf.io.gfile.exists(filename): logits = [] test_iterator = iter(test_dataset) for _ in range(steps_per_eval): features = next(test_iterator)['features'] # pytype: disable=unsupported-operands logits.append(model(features, training=False)) logits = tf.concat(logits, axis=0) with tf.io.gfile.GFile(filename, 'w') as f: np.save(f, logits.numpy()) numerator = m * num_datasets + (n + 1) denominator = len(unique_selected_members) * num_datasets percent = numerator / denominator message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. ' 'Dataset {:d}/{:d}'.format(percent, m + 1, len(unique_selected_members), n + 1, num_datasets)) logging.info(message) metrics = { 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/gibbs_cross_entropy': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': rm.metrics.ExpectedCalibrationError( num_bins=FLAGS.num_bins), 'test/diversity': rm.metrics.AveragePairwiseDiversity(), } metrics.update(val_metrics) corrupt_metrics = {} for name in test_datasets: corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean() corrupt_metrics['test/accuracy_{}'.format(name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(name)] = ( rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) for i in range(len(unique_selected_members)): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) # Evaluate model predictions. for n, (name, test_dataset) in enumerate(test_datasets.items()): logits_dataset = [] for member_id in selected_members: filename = '{dataset}_{member}.npy'.format(dataset=name, member=member_id) filename = os.path.join(FLAGS.output_dir, filename) with tf.io.gfile.GFile(filename, 'rb') as f: logits_dataset.append(np.load(f)) logits_dataset = tf.convert_to_tensor(logits_dataset) test_iterator = iter(test_dataset) for step in range(steps_per_eval): labels = next(test_iterator)['labels'] # pytype: disable=unsupported-operands logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)] labels = tf.cast(labels, tf.int32) negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy() negative_log_likelihood_metric.add_batch(logits, labels=labels) negative_log_likelihood = list( negative_log_likelihood_metric.result().values())[0] per_probs = tf.nn.softmax(logits) probs = tf.reduce_mean(per_probs, axis=0) if name == 'clean': gibbs_ce_metric = rm.metrics.GibbsCrossEntropy() gibbs_ce_metric.add_batch(logits, labels=labels) gibbs_ce = list(gibbs_ce_metric.result().values())[0] metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].add_batch(probs, label=labels) # Attention must be paid to deal with duplicated members: # e.g., #. selected_members = [2, 7, 3, 3] # unique_selected_members = [2, 3, 7] # selected_members.index(3) --> 2 for member_id in unique_selected_members: i = selected_members.index(member_id) member_probs = per_probs[i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state(member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) metrics['test/diversity'].add_batch(per_probs) else: corrupt_metrics['test/nll_{}'.format(name)].update_state( negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format(name)].update_state( labels, probs) corrupt_metrics['test/ece_{}'.format(name)].add_batch( probs, label=labels) message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format( (n + 1) / num_datasets, n + 1, num_datasets)) logging.info(message) corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics, corruption_types) total_results = {name: metric.result() for name, metric in metrics.items()} total_results.update(corrupt_results) # Results from Robustness Metrics themselves return a dict, so flatten them. total_results = utils.flatten_dictionary(total_results) logging.info('Metrics: %s', total_results)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) train_batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.batch_repetitions) test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // train_batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // test_batch_size data_dir = FLAGS.data_dir if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) train_builder = ub.datasets.ImageNetDataset( split=tfds.Split.TRAIN, use_bfloat16=FLAGS.use_bfloat16, data_dir=data_dir) train_dataset = train_builder.load(batch_size=train_batch_size, strategy=strategy) test_builder = ub.datasets.ImageNetDataset(split=tfds.Split.TEST, use_bfloat16=FLAGS.use_bfloat16, data_dir=data_dir) test_dataset = test_builder.load(batch_size=test_batch_size, strategy=strategy) if FLAGS.use_bfloat16: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16') with strategy.scope(): logging.info('Building Keras ResNet-50 model') model = ub.models.resnet50_mimo( input_shape=(FLAGS.ensemble_size, 224, 224, 3), num_classes=NUM_CLASSES, ensemble_size=FLAGS.ensemble_size, width_multiplier=FLAGS.width_multiplier) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * train_batch_size / 256 decay_epochs = [ (FLAGS.train_epochs * 30) // 90, (FLAGS.train_epochs * 60) // 90, (FLAGS.train_epochs * 80) // 90, ] learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule( steps_per_epoch=steps_per_epoch, base_learning_rate=base_lr, decay_ratio=0.1, decay_epochs=decay_epochs, warmup_epochs=5) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=1.0 - FLAGS.one_minus_momentum, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/diversity': rm.metrics.AveragePairwiseDiversity(), } for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) logging.info('Finished building Keras ResNet-50 model') checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] batch_size = tf.shape(images)[0] main_shuffle = tf.random.shuffle( tf.tile(tf.range(batch_size), [FLAGS.batch_repetitions])) to_shuffle = tf.cast( tf.cast(tf.shape(main_shuffle)[0], tf.float32) * (1. - FLAGS.input_repetition_probability), tf.int32) shuffle_indices = [ tf.concat([ tf.random.shuffle(main_shuffle[:to_shuffle]), main_shuffle[to_shuffle:] ], axis=0) for _ in range(FLAGS.ensemble_size) ] images = tf.stack([ tf.gather(images, indices, axis=0) for indices in shuffle_indices ], axis=1) labels = tf.stack([ tf.gather(labels, indices, axis=0) for indices in shuffle_indices ], axis=1) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( tf.reduce_sum( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True), axis=1)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the weights. This excludes BN parameters and biases, but # pay caution to their naming scheme. if 'kernel' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(tf.reshape(logits, [-1, NUM_CLASSES])) flat_labels = tf.reshape(labels, [-1]) metrics['train/ece'].add_batch(probs, label=flat_labels) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(flat_labels, probs) for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)): strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] images = tf.tile(tf.expand_dims(images, 1), [1, FLAGS.ensemble_size, 1, 1, 1]) logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) per_probs = tf.transpose(probs, perm=[1, 0, 2]) metrics['test/diversity'].add_batch(per_probs) for i in range(FLAGS.ensemble_size): member_probs = probs[:, i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) # Negative log marginal likelihood computed in a numerically-stable way. labels_tiled = tf.tile(tf.expand_dims(labels, 1), [1, FLAGS.ensemble_size]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_tiled, logits, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[1]) + tf.math.log(float(FLAGS.ensemble_size))) probs = tf.math.reduce_mean(probs, axis=1) # marginalize metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].add_batch(probs, label=labels) for _ in tf.range(tf.cast(steps_per_eval, tf.int32)): strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) train_step(train_iterator) current_step = (epoch + 1) * steps_per_epoch max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) logging.info(message) test_iterator = iter(test_dataset) logging.info('Starting to run eval of epoch: %s', epoch) test_start_time = time.time() test_step(test_iterator) ms_per_example = (time.time() - test_start_time) * 1e6 / test_batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } # Results from Robustness Metrics themselves return a dict, so flatten them. total_results = utils.flatten_dictionary(total_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for _, metric in metrics.items(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_save_name = os.path.join(FLAGS.output_dir, 'model') model.save(final_save_name) logging.info('Saved model to %s', final_save_name) with summary_writer.as_default(): hp.hparams({ 'base_learning_rate': FLAGS.base_learning_rate, 'one_minus_momentum': FLAGS.one_minus_momentum, 'l2': FLAGS.l2, 'batch_repetitions': FLAGS.batch_repetitions, })
def main(argv): del argv # unused arg if not FLAGS.use_gpu: raise ValueError('Only GPU is currently supported.') if FLAGS.num_cores > 1: raise ValueError('Only a single accelerator is currently supported.') tf.random.set_seed(FLAGS.seed) tf.io.gfile.makedirs(FLAGS.output_dir) ds_info = tfds.builder(FLAGS.dataset).info batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores steps_per_eval = ds_info.splits['test'].num_examples // batch_size num_classes = ds_info.features['label'].num_classes data_dir = FLAGS.data_dir dataset = ub.datasets.get( FLAGS.dataset, download_data=FLAGS.download_data, data_dir=data_dir, split=tfds.Split.TEST).load(batch_size=batch_size) test_datasets = {'clean': dataset} if FLAGS.dataset == 'cifar100': data_dir = FLAGS.cifar100_c_path corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset) for corruption_type in corruption_types: for severity in range(1, 6): dataset = ub.datasets.get( f'{FLAGS.dataset}_corrupted', corruption_type=corruption_type, data_dir=data_dir, download_data=FLAGS.download_data, severity=severity, split=tfds.Split.TEST).load(batch_size=batch_size) test_datasets[f'{corruption_type}_{severity}'] = dataset model = ub.models.wide_resnet_heteroscedastic( input_shape=ds_info.features['image'].shape, depth=28, width_multiplier=10, num_classes=num_classes, l2=0., version=2, temperature=FLAGS.temperature, num_factors=FLAGS.num_factors, num_mc_samples=FLAGS.num_mc_samples) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Search for checkpoints from their index file; then remove the index suffix. ensemble_filenames = parse_checkpoint_dir(FLAGS.checkpoint_dir) ensemble_size = len(ensemble_filenames) logging.info('Ensemble size: %s', ensemble_size) logging.info('Ensemble number of weights: %s', ensemble_size * model.count_params()) logging.info('Ensemble filenames: %s', str(ensemble_filenames)) checkpoint = tf.train.Checkpoint(model=model) # Write model predictions to files. num_datasets = len(test_datasets) for m, ensemble_filename in enumerate(ensemble_filenames): checkpoint.restore(ensemble_filename) for n, (name, test_dataset) in enumerate(test_datasets.items()): filename = '{dataset}_{member}.npy'.format(dataset=name, member=m) filename = os.path.join(FLAGS.output_dir, filename) if not tf.io.gfile.exists(filename): logits = [] test_iterator = iter(test_dataset) for _ in range(steps_per_eval): features = next(test_iterator)['features'] # pytype: disable=unsupported-operands logits.append(model(features, training=False)) logits = tf.concat(logits, axis=0) with tf.io.gfile.GFile(filename, 'w') as f: np.save(f, logits.numpy()) percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets) message = ( '{:.1%} completion for prediction: ensemble member {:d}/{:d}. ' 'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size, n + 1, num_datasets)) logging.info(message) metrics = { 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/gibbs_cross_entropy': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/diversity': rm.metrics.AveragePairwiseDiversity(), } corrupt_metrics = {} for name in test_datasets: corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean() corrupt_metrics['test/accuracy_{}'.format(name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(name)] = ( rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) for i in range(ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) # Evaluate model predictions. for n, (name, test_dataset) in enumerate(test_datasets.items()): logits_dataset = [] for m in range(ensemble_size): filename = '{dataset}_{member}.npy'.format(dataset=name, member=m) filename = os.path.join(FLAGS.output_dir, filename) with tf.io.gfile.GFile(filename, 'rb') as f: logits_dataset.append(np.load(f)) logits_dataset = tf.convert_to_tensor(logits_dataset) test_iterator = iter(test_dataset) for step in range(steps_per_eval): labels = next(test_iterator)['labels'] # pytype: disable=unsupported-operands logits = logits_dataset[:, (step * batch_size):((step + 1) * batch_size)] labels = tf.cast(labels, tf.int32) negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy() negative_log_likelihood_metric.add_batch(logits, labels=labels) negative_log_likelihood = list( negative_log_likelihood_metric.result().values())[0] per_probs = tf.nn.softmax(logits) probs = tf.reduce_mean(per_probs, axis=0) if name == 'clean': gibbs_ce_metric = rm.metrics.GibbsCrossEntropy() gibbs_ce_metric.add_batch(logits, labels=labels) gibbs_ce = list(gibbs_ce_metric.result().values())[0] metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].add_batch(probs, label=labels) for i in range(ensemble_size): member_probs = per_probs[i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) metrics['test/diversity'].add_batch(per_probs) else: corrupt_metrics['test/nll_{}'.format(name)].update_state( negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format(name)].update_state( labels, probs) corrupt_metrics['test/ece_{}'.format(name)].add_batch( probs, label=labels) message = ( '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format( (n + 1) / num_datasets, n + 1, num_datasets)) logging.info(message) corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics, corruption_types) total_results = {name: metric.result() for name, metric in metrics.items()} total_results.update(corrupt_results) # Results from Robustness Metrics themselves return a dict, so flatten them. total_results = utils.flatten_dictionary(total_results) logging.info('Metrics: %s', total_results)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving Deep Ensemble predictions to %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) if FLAGS.num_cores > 1: raise ValueError('Only a single accelerator is currently supported.') if FLAGS.use_gpu: logging.info('Use GPU') else: logging.info('Use CPU') os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # As per the Kaggle challenge, we have split sizes: # train: 35,126 # validation: 10,906 (currently unused) # test: 42,670 ds_info = tfds.builder('diabetic_retinopathy_detection').info eval_batch_size = FLAGS.eval_batch_size * FLAGS.num_cores steps_per_eval = ds_info.splits['test'].num_examples // eval_batch_size dataset_test_builder = ub.datasets.get('diabetic_retinopathy_detection', split='test', data_dir=FLAGS.data_dir) dataset_test = dataset_test_builder.load(batch_size=eval_batch_size) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) # TODO(nband): debug, switch from keras.models.save to Checkpoint logging.info('Building Keras ResNet-50 Deep Ensemble model.') ensemble_filenames = utils.parse_keras_models(FLAGS.checkpoint_dir) ensemble_size = len(ensemble_filenames) logging.info('Ensemble size: %s', ensemble_size) logging.info('Ensemble Keras model dir names: %s', str(ensemble_filenames)) # Write model predictions to files. for member, ensemble_filename in enumerate(ensemble_filenames): model = tf.keras.models.load_model(ensemble_filename, compile=False) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) filename = f'{member}.npy' filename = os.path.join(FLAGS.output_dir, filename) if not tf.io.gfile.exists(filename): logits = [] test_iterator = iter(dataset_test) for i in range(steps_per_eval): inputs = next(test_iterator) # pytype: disable=attribute-error images = inputs['features'] logits.append(model(images, training=False)) if i % 100 == 0: logging.info( 'Ensemble member %d/%d: Completed %d of %d eval steps.', member + 1, ensemble_size, i + 1, steps_per_eval) logits = tf.concat(logits, axis=0) with tf.io.gfile.GFile(filename, 'w') as f: np.save(f, logits.numpy()) percent = (member + 1) / ensemble_size message = ( '{:.1%} completion for prediction: ensemble member {:d}/{:d}.'. format(percent, member + 1, ensemble_size)) logging.info(message) metrics = { 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/gibbs_cross_entropy': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.BinaryAccuracy(), 'test/auprc': tf.keras.metrics.AUC(curve='PR'), 'test/auroc': tf.keras.metrics.AUC(curve='ROC'), 'test/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/diversity': rm.metrics.AveragePairwiseDiversity(), } for i in range(ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.BinaryAccuracy()) # Evaluate model predictions. logits_dataset = [] for member in range(ensemble_size): filename = f'{member}.npy' filename = os.path.join(FLAGS.output_dir, filename) with tf.io.gfile.GFile(filename, 'rb') as f: logits_dataset.append(np.load(f)) logits_dataset = tf.convert_to_tensor(logits_dataset) test_iterator = iter(dataset_test) for step in range(steps_per_eval): inputs = next(test_iterator) # pytype: disable=attribute-error labels = inputs['labels'] logits = logits_dataset[:, (step * eval_batch_size):((step + 1) * eval_batch_size)] labels = tf.cast(labels, tf.float32) logits = tf.cast(logits, tf.float32) negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy( binary=True) negative_log_likelihood_metric.add_batch(logits, labels=tf.expand_dims( labels, axis=-1)) negative_log_likelihood = list( negative_log_likelihood_metric.result().values())[0] per_probs = tf.nn.sigmoid(logits) probs = tf.reduce_mean(per_probs, axis=0) gibbs_ce_metric = rm.metrics.GibbsCrossEntropy(binary=True) gibbs_ce_metric.add_batch(logits, labels=tf.expand_dims(labels, axis=-1)) gibbs_ce = list(gibbs_ce_metric.result().values())[0] metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce) metrics['test/accuracy'].update_state(labels, probs) metrics['test/auprc'].update_state(labels, probs) metrics['test/auroc'].update_state(labels, probs) metrics['test/ece'].add_batch(probs, label=labels) metrics['test/diversity'].add_batch(per_probs) for i in range(ensemble_size): member_probs = per_probs[i] member_loss = tf.keras.losses.binary_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state(member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) total_results = {name: metric.result() for name, metric in metrics.items()} # Results from Robustness Metrics themselves return a dict, so flatten them. total_results = utils.flatten_dictionary(total_results) logging.info('Metrics: %s', total_results)
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) data_dir = FLAGS.data_dir if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size batch_size = per_core_batch_size * FLAGS.num_cores check_bool = FLAGS.train_proportion > 0 and FLAGS.train_proportion <= 1 assert check_bool, 'Proportion of train set has to meet 0 < prop <= 1.' drop_remainder_validation = True if not FLAGS.use_gpu: # This has to be True for TPU traing, otherwise the batchsize of images in # the validation set can't be determined by TPU compile. assert drop_remainder_validation, 'drop_remainder must be True in TPU mode.' validation_percent = 1 - FLAGS.train_proportion train_dataset = ub.datasets.get( FLAGS.dataset, data_dir=data_dir, download_data=FLAGS.download_data, split=tfds.Split.TRAIN, validation_percent=validation_percent).load(batch_size=batch_size) validation_dataset = ub.datasets.get( FLAGS.dataset, data_dir=data_dir, download_data=FLAGS.download_data, split=tfds.Split.VALIDATION, validation_percent=validation_percent, drop_remainder=drop_remainder_validation).load(batch_size=batch_size) validation_dataset = validation_dataset.repeat() clean_test_dataset = ub.datasets.get( FLAGS.dataset, data_dir=data_dir, download_data=FLAGS.download_data, split=tfds.Split.TEST).load(batch_size=batch_size) train_dataset = strategy.experimental_distribute_dataset(train_dataset) validation_dataset = strategy.experimental_distribute_dataset( validation_dataset) test_datasets = { 'clean': strategy.experimental_distribute_dataset(clean_test_dataset), } if FLAGS.corruptions_interval > 0: if FLAGS.dataset == 'cifar100': data_dir = FLAGS.cifar100_c_path corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset) for corruption_type in corruption_types: for severity in range(1, 6): dataset = ub.datasets.get( f'{FLAGS.dataset}_corrupted', corruption_type=corruption_type, data_dir=data_dir, severity=severity, split=tfds.Split.TEST).load(batch_size=batch_size) test_datasets[f'{corruption_type}_{severity}'] = ( strategy.experimental_distribute_dataset(dataset)) ds_info = tfds.builder(FLAGS.dataset).info train_sample_size = ds_info.splits[ 'train'].num_examples * FLAGS.train_proportion steps_per_epoch = int(train_sample_size / batch_size) train_sample_size = int(train_sample_size) steps_per_eval = ds_info.splits['test'].num_examples // batch_size num_classes = ds_info.features['label'].num_classes summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) logging.info('Building Keras model.') depth = 28 width = 10 dict_ranges = {'min': FLAGS.min_l2_range, 'max': FLAGS.max_l2_range} ranges = [dict_ranges for _ in range(6)] # 6 independent l2 parameters model_config = { 'key_to_index': { 'input_conv_l2_kernel': 0, 'group_l2_kernel': 1, 'group_1_l2_kernel': 2, 'group_2_l2_kernel': 3, 'dense_l2_kernel': 4, 'dense_l2_bias': 5, }, 'ranges': ranges, 'test': None } lambdas_config = LambdaConfig(model_config['ranges'], model_config['key_to_index']) if FLAGS.e_body_hidden_units > 0: e_body_arch = '({},)'.format(FLAGS.e_body_hidden_units) else: e_body_arch = '()' e_shared_arch = '()' e_activation = 'tanh' filters_resnet = [16] for i in range(0, 3): # 3 groups of blocks filters_resnet.extend([16 * width * 2**i] * 9) # 9 layers in each block # e_head dim for conv2d is just the number of filters (only # kernel) and twice num of classes for the last dense layer (kernel + bias) e_head_dims = [x for x in filters_resnet] + [2 * num_classes] with strategy.scope(): e_models = e_factory( lambdas_config.input_shape, e_head_dims=e_head_dims, e_body_arch=eval(e_body_arch), # pylint: disable=eval-used e_shared_arch=eval(e_shared_arch), # pylint: disable=eval-used activation=e_activation, use_bias=FLAGS.e_model_use_bias, e_head_init=FLAGS.init_emodels_stddev) model = wide_resnet_hyperbatchensemble( input_shape=ds_info.features['image'].shape, depth=depth, width_multiplier=width, num_classes=num_classes, ensemble_size=FLAGS.ensemble_size, random_sign_init=FLAGS.random_sign_init, config=lambdas_config, e_models=e_models, l2_batchnorm_layer=FLAGS.l2_batchnorm, regularize_fast_weights=FLAGS.regularize_fast_weights, fast_weights_eq_contraint=FLAGS.fast_weights_eq_contraint, version=2) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # build hyper-batchensemble complete ------------------------- # Initialize Lambda distributions for tuning lambdas_mean = tf.reduce_mean( log_uniform_mean( [lambdas_config.log_min, lambdas_config.log_max])) lambdas0 = tf.random.normal((FLAGS.ensemble_size, lambdas_config.dim), lambdas_mean, 0.1 * FLAGS.ens_init_delta_bounds) lower0 = lambdas0 - tf.constant(FLAGS.ens_init_delta_bounds) lower0 = tf.maximum(lower0, 1e-8) upper0 = lambdas0 + tf.constant(FLAGS.ens_init_delta_bounds) log_lower = tf.Variable(tf.math.log(lower0)) log_upper = tf.Variable(tf.math.log(upper0)) lambda_parameters = [log_lower, log_upper] # these variables are tuned clip_lambda_parameters(lambda_parameters, lambdas_config) # Optimizer settings to train model weights # Linearly scale learning rate and the decay epochs by vanilla settings. # Note: Here, we don't divide the epochs by 200 as for the other uncertainty # baselines. base_lr = FLAGS.base_learning_rate * batch_size / 128 lr_decay_epochs = [int(l) for l in FLAGS.lr_decay_epochs] lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule( steps_per_epoch, base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=1.0 - FLAGS.one_minus_momentum, nesterov=True) # tuner used for optimizing lambda_parameters tuner = tf.keras.optimizers.Adam(FLAGS.lr_tuning) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': rm.metrics.ExpectedCalibrationError( num_bins=FLAGS.num_bins), 'train/diversity': rm.metrics.AveragePairwiseDiversity(), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': rm.metrics.ExpectedCalibrationError( num_bins=FLAGS.num_bins), 'test/gibbs_nll': tf.keras.metrics.Mean(), 'test/gibbs_accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/diversity': rm.metrics.AveragePairwiseDiversity(), 'validation/loss': tf.keras.metrics.Mean(), 'validation/loss_entropy': tf.keras.metrics.Mean(), 'validation/loss_ce': tf.keras.metrics.Mean() } corrupt_metrics = {} for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) if FLAGS.corruptions_interval > 0: for intensity in range(1, 6): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)) checkpoint = tf.train.Checkpoint( model=model, lambda_parameters=lambda_parameters, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint and FLAGS.restore_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) # generate lambdas lambdas = log_uniform_sample( per_core_batch_size, lambda_parameters) lambdas = tf.reshape( lambdas, (FLAGS.ensemble_size * per_core_batch_size, lambdas_config.dim)) with tf.GradientTape() as tape: logits = model([images, lambdas], training=True) if FLAGS.use_gibbs_ce: # Average of single model CEs # tiling of labels should be only done for Gibbs CE loss labels = tf.tile(labels, [FLAGS.ensemble_size]) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)) else: # Ensemble CE uses no tiling of the labels negative_log_likelihood = ensemble_crossentropy( labels, logits, FLAGS.ensemble_size) # Note: Divide l2_loss by sample_size (this differs from uncertainty_ # baselines implementation.) l2_loss = sum(model.losses) / train_sample_size loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate for fast weights. grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): if (('alpha' in var.name or 'gamma' in var.name) and 'batch_norm' not in var.name): grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) probs = tf.nn.softmax(logits) per_probs = tf.split( probs, num_or_size_splits=FLAGS.ensemble_size, axis=0) per_probs_stacked = tf.stack(per_probs, axis=0) metrics['train/ece'].add_batch(probs, label=labels) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) metrics['train/diversity'].add_batch(per_probs_stacked) if grads_and_vars: grads, _ = zip(*grads_and_vars) strategy.run(step_fn, args=(next(iterator),)) @tf.function def tuning_step(iterator): """Tuning StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(lambda_parameters) # sample lambdas if FLAGS.sample_and_tune: lambdas = log_uniform_sample( per_core_batch_size, lambda_parameters) else: lambdas = log_uniform_mean(lambda_parameters) lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0) lambdas = tf.reshape(lambdas, (FLAGS.ensemble_size * per_core_batch_size, lambdas_config.dim)) # ensemble CE logits = model([images, lambdas], training=False) ce = ensemble_crossentropy(labels, logits, FLAGS.ensemble_size) # entropy penalty for lambda distribution entropy = FLAGS.tau * log_uniform_entropy( lambda_parameters) loss = ce - entropy scaled_loss = loss / strategy.num_replicas_in_sync gradients = tape.gradient(loss, lambda_parameters) tuner.apply_gradients(zip(gradients, lambda_parameters)) metrics['validation/loss_ce'].update_state(ce / strategy.num_replicas_in_sync) metrics['validation/loss_entropy'].update_state( entropy / strategy.num_replicas_in_sync) metrics['validation/loss'].update_state(scaled_loss) strategy.run(step_fn, args=(next(iterator),)) @tf.function def test_step(iterator, dataset_name, num_eval_samples=0): """Evaluation StepFn.""" n_samples = num_eval_samples if num_eval_samples >= 0 else -num_eval_samples if num_eval_samples >= 0: # the +1 accounts for the fact that we add the mean of lambdas ensemble_size = FLAGS.ensemble_size * (1 + n_samples) else: ensemble_size = FLAGS.ensemble_size * n_samples def step_fn(inputs): """Per-Replica StepFn.""" # Note that we don't use tf.tile for labels here images = inputs['features'] labels = inputs['labels'] images = tf.tile(images, [ensemble_size, 1, 1, 1]) # get lambdas samples = log_uniform_sample(n_samples, lambda_parameters) if num_eval_samples >= 0: lambdas = log_uniform_mean(lambda_parameters) lambdas = tf.expand_dims(lambdas, 1) lambdas = tf.concat((lambdas, samples), 1) else: lambdas = samples # lambdas with shape (ens size, samples, dim of lambdas) rep_lambdas = tf.repeat(lambdas, per_core_batch_size, axis=1) rep_lambdas = tf.reshape(rep_lambdas, (ensemble_size * per_core_batch_size, -1)) # eval on testsets logits = model([images, rep_lambdas], training=False) probs = tf.nn.softmax(logits) per_probs = tf.split(probs, num_or_size_splits=ensemble_size, axis=0) # per member performance and gibbs performance (average per member perf) if dataset_name == 'clean': for i in range(FLAGS.ensemble_size): # we record the first sample of lambdas per batch-ens member first_member_index = i * (ensemble_size // FLAGS.ensemble_size) member_probs = per_probs[first_member_index] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state(member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) labels_tile = tf.tile(labels, [ensemble_size]) metrics['test/gibbs_nll'].update_state(tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels_tile, logits, from_logits=True))) metrics['test/gibbs_accuracy'].update_state(labels_tile, probs) # ensemble performance negative_log_likelihood = ensemble_crossentropy(labels, logits, ensemble_size) probs = tf.reduce_mean(per_probs, axis=0) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].add_batch(probs, label=labels) else: corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state( negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state( labels, probs) corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch( probs, label=labels) if dataset_name == 'clean': per_probs_stacked = tf.stack(per_probs, axis=0) metrics['test/diversity'].add_batch(per_probs_stacked) strategy.run(step_fn, args=(next(iterator),)) logging.info( '--- Starting training using %d examples. ---', train_sample_size) train_iterator = iter(train_dataset) validation_iterator = iter(validation_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) for step in range(steps_per_epoch): train_step(train_iterator) do_tuning = (epoch >= FLAGS.tuning_warmup_epochs) if do_tuning and ((step + 1) % FLAGS.tuning_every_x_step == 0): tuning_step(validation_iterator) # clip lambda parameters if outside of range clip_lambda_parameters(lambda_parameters, lambdas_config) current_step = epoch * steps_per_epoch + (step + 1) max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) if step % 20 == 0: logging.info(message) # evaluate on test data datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_step(test_iterator, dataset_name, FLAGS.num_eval_samples) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics, corruption_types) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Validation Loss: %.4f, CE: %.4f, Entropy: %.4f', metrics['validation/loss'].result(), metrics['validation/loss_ce'].result(), metrics['validation/loss_entropy'].result()) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_results = {name: metric.result() for name, metric in metrics.items()} total_results.update( {name: metric.result() for name, metric in corrupt_metrics.items()}) total_results.update(corrupt_results) # Results from Robustness Metrics themselves return a dict, so flatten them. total_results = utils.flatten_dictionary(total_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() # save checkpoint and lambdas config if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) lambdas_cf = lambdas_config.get_config() filepath = os.path.join(FLAGS.output_dir, 'lambdas_config.p') with tf.io.gfile.GFile(filepath, 'wb') as fp: pickle.dump(lambdas_cf, fp, protocol=pickle.HIGHEST_PROTOCOL) logging.info('Saved checkpoint to %s', checkpoint_name) with summary_writer.as_default(): hp.hparams({ 'base_learning_rate': FLAGS.base_learning_rate, 'one_minus_momentum': FLAGS.one_minus_momentum, 'l2': FLAGS.l2, 'random_sign_init': FLAGS.random_sign_init, 'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier, })
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) data_dir = FLAGS.data_dir if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) ds_info = tfds.builder(FLAGS.dataset).info train_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.batch_repetitions test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores train_dataset_size = ds_info.splits['train'].num_examples steps_per_epoch = train_dataset_size // train_batch_size steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size num_classes = ds_info.features['label'].num_classes train_builder = ub.datasets.get(FLAGS.dataset, data_dir=data_dir, download_data=FLAGS.download_data, split=tfds.Split.TRAIN, validation_percent=1. - FLAGS.train_proportion) train_dataset = train_builder.load(batch_size=train_batch_size) validation_dataset = None steps_per_validation = 0 if FLAGS.train_proportion < 1.0: validation_builder = ub.datasets.get(FLAGS.dataset, data_dir=data_dir, download_data=FLAGS.download_data, split=tfds.Split.VALIDATION, validation_percent=1. - FLAGS.train_proportion) validation_dataset = validation_builder.load( batch_size=test_batch_size) validation_dataset = strategy.experimental_distribute_dataset( validation_dataset) steps_per_validation = validation_builder.num_examples // test_batch_size clean_test_builder = ub.datasets.get(FLAGS.dataset, data_dir=data_dir, download_data=FLAGS.download_data, split=tfds.Split.TEST) clean_test_dataset = clean_test_builder.load(batch_size=test_batch_size) train_dataset = strategy.experimental_distribute_dataset(train_dataset) test_datasets = { 'clean': strategy.experimental_distribute_dataset(clean_test_dataset), } steps_per_epoch = train_builder.num_examples // train_batch_size steps_per_eval = clean_test_builder.num_examples // test_batch_size num_classes = 100 if FLAGS.dataset == 'cifar100' else 10 if FLAGS.corruptions_interval > 0: if FLAGS.dataset == 'cifar100': data_dir = FLAGS.cifar100_c_path corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset) for corruption_type in corruption_types: for severity in range(1, 6): dataset = ub.datasets.get( f'{FLAGS.dataset}_corrupted', corruption_type=corruption_type, data_dir=data_dir, severity=severity, split=tfds.Split.TEST).load(batch_size=test_batch_size) test_datasets[f'{corruption_type}_{severity}'] = ( strategy.experimental_distribute_dataset(dataset)) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras model') model = ub.models.wide_resnet_mimo( input_shape=[FLAGS.ensemble_size] + list(ds_info.features['image'].shape), depth=28, width_multiplier=FLAGS.width_multiplier, num_classes=num_classes, ensemble_size=FLAGS.ensemble_size) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Linearly scale learning rate and the decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * train_batch_size / 128 lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200 for start_epoch_str in FLAGS.lr_decay_epochs] lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule( steps_per_epoch, base_lr, FLAGS.lr_decay_ratio, lr_decay_epochs, FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD(lr_schedule, momentum=1.0 - FLAGS.one_minus_momentum, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/diversity': rm.metrics.AveragePairwiseDiversity(), } eval_dataset_splits = ['test'] if validation_dataset: metrics.update({ 'validation/negative_log_likelihood': tf.keras.metrics.Mean(), 'validation/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'validation/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), }) eval_dataset_splits += ['validation'] for i in range(FLAGS.ensemble_size): for dataset_split in eval_dataset_splits: metrics[ f'{dataset_split}/nll_member_{i}'] = tf.keras.metrics.Mean( ) metrics[f'{dataset_split}/accuracy_member_{i}'] = ( tf.keras.metrics.SparseCategoricalAccuracy()) if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, 6): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( rm.metrics.ExpectedCalibrationError( num_bins=FLAGS.num_bins)) for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] batch_size = tf.shape(images)[0] main_shuffle = tf.random.shuffle( tf.tile(tf.range(batch_size), [FLAGS.batch_repetitions])) to_shuffle = tf.cast( tf.cast(tf.shape(main_shuffle)[0], tf.float32) * (1. - FLAGS.input_repetition_probability), tf.int32) shuffle_indices = [ tf.concat([ tf.random.shuffle(main_shuffle[:to_shuffle]), main_shuffle[to_shuffle:] ], axis=0) for _ in range(FLAGS.ensemble_size) ] images = tf.stack([ tf.gather(images, indices, axis=0) for indices in shuffle_indices ], axis=1) labels = tf.stack([ tf.gather(labels, indices, axis=0) for indices in shuffle_indices ], axis=1) with tf.GradientTape() as tape: logits = model(images, training=True) negative_log_likelihood = tf.reduce_mean( tf.reduce_sum( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True), axis=1)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the BN parameters and bias terms. if ('kernel' in var.name or 'batch_norm' in var.name or 'bias' in var.name): filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) probs = tf.nn.softmax(tf.reshape(logits, [-1, num_classes])) flat_labels = tf.reshape(labels, [-1]) metrics['train/ece'].add_batch(probs, label=flat_labels) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(flat_labels, probs) for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)): strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_split, dataset_name, num_steps): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] images = tf.tile(tf.expand_dims(images, 1), [1, FLAGS.ensemble_size, 1, 1, 1]) logits = model(images, training=False) probs = tf.nn.softmax(logits) if dataset_name == 'clean': per_probs = tf.transpose(probs, perm=[1, 0, 2]) metrics['test/diversity'].add_batch(per_probs) for i in range(FLAGS.ensemble_size): member_probs = probs[:, i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics[f'{dataset_split}/nll_member_{i}'].update_state( member_loss) metrics[f'{dataset_split}/accuracy_member_{i}'].update_state( labels, member_probs) # Negative log marginal likelihood computed in a numerically-stable way. labels_tiled = tf.tile(tf.expand_dims(labels, 1), [1, FLAGS.ensemble_size]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_tiled, logits, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[1]) + tf.math.log(float(FLAGS.ensemble_size))) probs = tf.math.reduce_mean(probs, axis=1) # marginalize if dataset_name == 'clean': metrics[ f'{dataset_split}/negative_log_likelihood'].update_state( negative_log_likelihood) metrics[f'{dataset_split}/accuracy'].update_state( labels, probs) metrics[f'{dataset_split}/ece'].add_batch(probs, label=labels) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch( probs, label=labels) for _ in tf.range(tf.cast(num_steps, tf.int32)): strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) train_step(train_iterator) current_step = (epoch + 1) * steps_per_epoch max_steps = steps_per_epoch * (FLAGS.train_epochs) time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) logging.info(message) if validation_dataset: validation_iterator = iter(validation_dataset) test_step(validation_iterator, 'validation', 'clean', steps_per_validation) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) logging.info('Starting to run eval at epoch: %s', epoch) test_start_time = time.time() test_step(test_iterator, 'test', dataset_name, steps_per_eval) ms_per_example = (time.time() - test_start_time) * 1e6 / test_batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) # Results from Robustness Metrics themselves return a dict, so flatten them. total_results = utils.flatten_dictionary(total_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name) with summary_writer.as_default(): hp.hparams({ 'base_learning_rate': FLAGS.base_learning_rate, 'one_minus_momentum': FLAGS.one_minus_momentum, 'l2': FLAGS.l2, 'batch_repetitions': FLAGS.batch_repetitions, })
def main(argv): del argv # unused arg tf.io.gfile.makedirs(FLAGS.output_dir) logging.info('Saving checkpoints at %s', FLAGS.output_dir) tf.random.set_seed(FLAGS.seed) per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size batch_size = per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) mixup_params = { 'ensemble_size': FLAGS.ensemble_size, 'mixup_alpha': FLAGS.mixup_alpha, 'adaptive_mixup': FLAGS.adaptive_mixup, 'num_classes': NUM_CLASSES, } train_builder = ub.datasets.ImageNetDataset( split=tfds.Split.TRAIN, one_hot=(FLAGS.mixup_alpha > 0), use_bfloat16=FLAGS.use_bfloat16, mixup_params=mixup_params, ensemble_size=FLAGS.ensemble_size) train_dataset = train_builder.load(batch_size=batch_size, strategy=strategy) test_builder = ub.datasets.ImageNetDataset(split=tfds.Split.TEST, use_bfloat16=FLAGS.use_bfloat16) clean_test_dataset = test_builder.load(batch_size=batch_size, strategy=strategy) test_datasets = { 'clean': clean_test_dataset, } if FLAGS.adaptive_mixup: validation_builder = ub.datasets.ImageNetDataset( split=tfds.Split.VALIDATION, run_mixup=True, use_bfloat16=FLAGS.use_bfloat16) imagenet_confidence_dataset = validation_builder.load( batch_size=FLAGS.per_core_batch_size * FLAGS.num_cores, strategy=strategy) if FLAGS.corruptions_interval > 0: corruption_types, max_intensity = utils.load_corrupted_test_info() for name in corruption_types: for intensity in range(1, max_intensity + 1): dataset_name = '{0}_{1}'.format(name, intensity) dataset = utils.load_corrupted_test_dataset( batch_size=batch_size, corruption_name=name, corruption_intensity=intensity, use_bfloat16=FLAGS.use_bfloat16) test_datasets[dataset_name] = ( strategy.experimental_distribute_dataset(dataset)) if FLAGS.use_bfloat16: policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16') tf.keras.mixed_precision.experimental.set_policy(policy) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras ResNet-50 model') model = ub.models.resnet_batchensemble( input_shape=(224, 224, 3), num_classes=NUM_CLASSES, ensemble_size=FLAGS.ensemble_size, random_sign_init=FLAGS.random_sign_init, use_ensemble_bn=FLAGS.use_ensemble_bn, depth=FLAGS.depth) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 256 decay_epochs = [ (FLAGS.train_epochs * 30) // 90, (FLAGS.train_epochs * 60) // 90, (FLAGS.train_epochs * 80) // 90, ] learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule( steps_per_epoch=steps_per_epoch, base_learning_rate=base_lr, decay_ratio=0.1, decay_epochs=decay_epochs, warmup_epochs=5) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=1.0 - FLAGS.one_minus_momentum, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/loss': tf.keras.metrics.Mean(), 'train/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'train/diversity': rm.metrics.AveragePairwiseDiversity(), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/diversity': rm.metrics.AveragePairwiseDiversity(), 'test/member_accuracy_mean': (tf.keras.metrics.SparseCategoricalAccuracy()), 'test/member_ece_mean': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins) } if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( rm.metrics.ExpectedCalibrationError( num_bins=FLAGS.num_bins)) corrupt_metrics['test/member_acc_mean_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/member_ece_mean_{}'.format( dataset_name)] = (rm.metrics.ExpectedCalibrationError( num_bins=FLAGS.num_bins)) for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) logging.info('Finished building Keras ResNet-50 model') checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] if FLAGS.adaptive_mixup: images = tf.identity(images) else: images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) if FLAGS.adaptive_mixup: labels = tf.identity(labels) elif FLAGS.mixup_alpha > 0: labels = tf.tile(labels, [FLAGS.ensemble_size, 1]) else: labels = tf.tile(labels, [FLAGS.ensemble_size]) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) per_probs = tf.reshape( probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0)) metrics['train/diversity'].add_batch(per_probs) if FLAGS.mixup_alpha > 0: negative_log_likelihood = tf.reduce_mean( tf.keras.losses.categorical_crossentropy( labels, logits, from_logits=True)) else: negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the slow weights and bias terms. This excludes BN # parameters and fast weight approximate posterior/prior parameters, # but pay caution to their naming scheme. if 'kernel' in var.name or 'bias' in var.name: filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) loss = negative_log_likelihood + l2_loss # Scale the loss given the TPUStrategy will reduce sum all gradients. scaled_loss = loss / strategy.num_replicas_in_sync grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate implementation. if FLAGS.fast_weight_lr_multiplier != 1.0: grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): # Apply different learning rate on the fast weights. This excludes BN # and slow weights, but pay caution to the naming scheme. if ('batch_norm' not in var.name and 'kernel' not in var.name): grads_and_vars.append( (grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) else: optimizer.apply_gradients(zip(grads, model.trainable_variables)) if FLAGS.mixup_alpha > 0: labels = tf.argmax(labels, axis=-1) metrics['train/ece'].add_batch(probs, label=labels) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/accuracy'].update_state(labels, logits) for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)): strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) logits = model(images, training=False) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) if dataset_name == 'clean': per_probs_tensor = tf.reshape( probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0)) metrics['test/diversity'].add_batch(per_probs_tensor) per_probs = tf.split(probs, num_or_size_splits=FLAGS.ensemble_size, axis=0) probs = tf.reduce_mean(per_probs, axis=0) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, probs)) for i in range(FLAGS.ensemble_size): member_probs = per_probs[i] if dataset_name == 'clean': member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format(i)].update_state( labels, member_probs) metrics['test/member_accuracy_mean'].update_state( labels, member_probs) metrics['test/member_ece_mean'].add_batch(member_probs, label=labels) elif dataset_name != 'confidence_validation': corrupt_metrics['test/member_acc_mean_{}'.format( dataset_name)].update_state(labels, member_probs) corrupt_metrics['test/member_ece_mean_{}'.format( dataset_name)].add_batch(member_probs, label=labels) if dataset_name == 'clean': metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].add_batch(probs, label=labels) elif dataset_name != 'confidence_validation': corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch( probs, label=labels) if dataset_name == 'confidence_validation': return tf.stack(per_probs, 0), labels return strategy.run(step_fn, args=(next(iterator), )) metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()}) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) train_step(train_iterator) current_step = (epoch + 1) * steps_per_epoch max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) logging.info(message) if FLAGS.adaptive_mixup: confidence_set_iterator = iter(imagenet_confidence_dataset) predictions_list = [] labels_list = [] for step in range(FLAGS.confidence_eval_iterations): temp_predictions, temp_labels = test_step( confidence_set_iterator, 'confidence_validation') predictions_list.append(temp_predictions) labels_list.append(temp_labels) predictions = [ tf.concat(list(predictions_list[i].values), axis=1) for i in range(len(predictions_list)) ] labels = [ tf.concat(list(labels_list[i].values), axis=0) for i in range(len(labels_list)) ] predictions = tf.concat(predictions, axis=1) labels = tf.cast(tf.concat(labels, axis=0), tf.int64) def compute_acc_conf(preds, label, focus_class): class_preds = tf.boolean_mask(preds, label == focus_class, axis=1) class_pred_labels = tf.argmax(class_preds, axis=-1) confidence = tf.reduce_mean( tf.reduce_max(class_preds, axis=-1), -1) accuracy = tf.reduce_mean(tf.cast( class_pred_labels == focus_class, tf.float32), axis=-1) return accuracy - confidence calibration_per_class = [ compute_acc_conf(predictions, labels, i) for i in range(NUM_CLASSES) ] calibration_per_class = tf.stack(calibration_per_class, axis=1) logging.info('calibration per class') logging.info(calibration_per_class) mixup_coeff = tf.where(calibration_per_class > 0, 1.0, FLAGS.mixup_alpha) mixup_coeff = tf.clip_by_value(mixup_coeff, 0, 1) logging.info('mixup coeff') logging.info(mixup_coeff) mixup_params['mixup_coeff'] = mixup_coeff train_builder = ub.datasets.ImageNetDataset( split=tfds.Split.TRAIN, one_hot=(FLAGS.mixup_alpha > 0), use_bfloat16=FLAGS.use_bfloat16, mixup_params=mixup_params) train_dataset = train_builder.load(batch_size=batch_size, strategy=strategy) train_iterator = iter(train_dataset) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): test_iterator = iter(test_dataset) logging.info('Testing on dataset %s', dataset_name) for step in range(steps_per_eval): if step % 20 == 0: logging.info('Starting to run eval step %s of epoch: %s', step, epoch) test_start_time = time.time() test_step(test_iterator, dataset_name) ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size metrics['test/ms_per_example'].update_state(ms_per_example) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity, FLAGS.alexnet_errors_path) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) # Results from Robustness Metrics themselves return a dict, so flatten them. total_results = utils.flatten_dictionary(total_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for _, metric in metrics.items(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_save_name = os.path.join(FLAGS.output_dir, 'model') model.save(final_save_name) logging.info('Saved model to %s', final_save_name) with summary_writer.as_default(): hp.hparams({ 'base_learning_rate': FLAGS.base_learning_rate, 'one_minus_momentum': FLAGS.one_minus_momentum, 'l2': FLAGS.l2, 'random_sign_init': FLAGS.random_sign_init, 'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier, })
def main(argv): del argv # unused arg tf.random.set_seed(FLAGS.seed) per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size batch_size = per_core_batch_size * FLAGS.num_cores steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size logging.info('Saving checkpoints at %s', FLAGS.output_dir) data_dir = FLAGS.data_dir if FLAGS.use_gpu: logging.info('Use GPU') strategy = tf.distribute.MirroredStrategy() else: logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local') resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) train_builder = ub.datasets.ImageNetDataset( split=tfds.Split.TRAIN, use_bfloat16=FLAGS.use_bfloat16, data_dir=data_dir) train_dataset = train_builder.load(batch_size=batch_size, strategy=strategy) test_builder = ub.datasets.ImageNetDataset(split=tfds.Split.TEST, use_bfloat16=FLAGS.use_bfloat16, data_dir=data_dir) clean_test_dataset = test_builder.load(batch_size=batch_size, strategy=strategy) test_datasets = {'clean': clean_test_dataset} if FLAGS.corruptions_interval > 0: corruption_types, max_intensity = utils.load_corrupted_test_info() for name in corruption_types: for intensity in range(1, max_intensity + 1): dataset_name = '{0}_{1}'.format(name, intensity) dataset = utils.load_corrupted_test_dataset( batch_size=batch_size, corruption_name=name, corruption_intensity=intensity, use_bfloat16=FLAGS.use_bfloat16) test_datasets[dataset_name] = ( strategy.experimental_distribute_dataset(dataset)) if FLAGS.use_bfloat16: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16') summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras ResNet-50 model') model = ub.models.resnet50_het_rank1( input_shape=(224, 224, 3), num_classes=NUM_CLASSES, alpha_initializer=FLAGS.alpha_initializer, gamma_initializer=FLAGS.gamma_initializer, alpha_regularizer=FLAGS.alpha_regularizer, gamma_regularizer=FLAGS.gamma_regularizer, use_additive_perturbation=FLAGS.use_additive_perturbation, ensemble_size=FLAGS.ensemble_size, random_sign_init=FLAGS.random_sign_init, dropout_rate=FLAGS.dropout_rate, prior_stddev=FLAGS.prior_stddev, use_tpu=not FLAGS.use_gpu, use_ensemble_bn=FLAGS.use_ensemble_bn, num_factors=FLAGS.num_factors, temperature=FLAGS.temperature, num_mc_samples=FLAGS.num_mc_samples) logging.info('Model input shape: %s', model.input_shape) logging.info('Model output shape: %s', model.output_shape) logging.info('Model number of weights: %s', model.count_params()) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate * batch_size / 256 decay_epochs = [ (FLAGS.train_epochs * 30) // 90, (FLAGS.train_epochs * 60) // 90, (FLAGS.train_epochs * 80) // 90, ] learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule( steps_per_epoch=steps_per_epoch, base_learning_rate=base_lr, decay_ratio=0.1, decay_epochs=decay_epochs, warmup_epochs=5) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=1.0 - FLAGS.one_minus_momentum, nesterov=True) metrics = { 'train/negative_log_likelihood': tf.keras.metrics.Mean(), 'train/kl': tf.keras.metrics.Mean(), 'train/kl_scale': tf.keras.metrics.Mean(), 'train/elbo': tf.keras.metrics.Mean(), 'train/loss': tf.keras.metrics.Mean(), 'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'train/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'train/diversity': rm.metrics.AveragePairwiseDiversity(), 'test/negative_log_likelihood': tf.keras.metrics.Mean(), 'test/kl': tf.keras.metrics.Mean(), 'test/elbo': tf.keras.metrics.Mean(), 'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(), 'test/ece': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), 'test/diversity': rm.metrics.AveragePairwiseDiversity(), 'test/member_accuracy_mean': (tf.keras.metrics.SparseCategoricalAccuracy()), 'test/member_ece_mean': rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins), } if FLAGS.corruptions_interval > 0: corrupt_metrics = {} for intensity in range(1, max_intensity + 1): for corruption in corruption_types: dataset_name = '{0}_{1}'.format(corruption, intensity) corrupt_metrics['test/nll_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/kl_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/elbo_{}'.format(dataset_name)] = ( tf.keras.metrics.Mean()) corrupt_metrics['test/accuracy_{}'.format( dataset_name)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) corrupt_metrics['test/ece_{}'.format(dataset_name)] = ( rm.metrics.ExpectedCalibrationError( num_bins=FLAGS.num_bins)) if FLAGS.ensemble_size > 1: for i in range(FLAGS.ensemble_size): metrics['test/nll_member_{}'.format( i)] = tf.keras.metrics.Mean() metrics['test/accuracy_member_{}'.format(i)] = ( tf.keras.metrics.SparseCategoricalAccuracy()) logging.info('Finished building Keras ResNet-50 model') checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir) initial_epoch = 0 if latest_checkpoint: # checkpoint.restore must be within a strategy.scope() so that optimizer # slot variables are mirrored. checkpoint.restore(latest_checkpoint) logging.info('Loaded checkpoint %s', latest_checkpoint) initial_epoch = optimizer.iterations.numpy() // steps_per_epoch def compute_l2_loss(model): filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the BN parameters and bias terms. This # excludes only fast weight approximate posterior/prior parameters, # but pay caution to their naming scheme. if ('kernel' in var.name or 'batch_norm' in var.name or 'bias' in var.name): filtered_variables.append(tf.reshape(var, (-1, ))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) return l2_loss @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] if FLAGS.ensemble_size > 1: images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) labels = tf.tile(labels, [FLAGS.ensemble_size]) with tf.GradientTape() as tape: logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) probs = tf.nn.softmax(logits) if FLAGS.ensemble_size > 1: per_probs = tf.reshape( probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0)) metrics['train/diversity'].add_batch(per_probs) negative_log_likelihood = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy( labels, logits, from_logits=True)) l2_loss = compute_l2_loss(model) kl = sum(model.losses) / APPROX_IMAGENET_TRAIN_IMAGES kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype) kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs kl_scale = tf.minimum(1., kl_scale) kl_loss = kl_scale * kl # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss + kl_loss scaled_loss = loss / strategy.num_replicas_in_sync elbo = -(negative_log_likelihood + l2_loss + kl) grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate implementation. if FLAGS.fast_weight_lr_multiplier != 1.0: grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): # Apply different learning rate on the fast weights. This excludes BN # and slow weights, but pay caution to the naming scheme. if ('batch_norm' not in var.name and 'kernel' not in var.name): grads_and_vars.append( (grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) else: optimizer.apply_gradients(zip(grads, model.trainable_variables)) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/kl'].update_state(kl) metrics['train/kl_scale'].update_state(kl_scale) metrics['train/elbo'].update_state(elbo) metrics['train/loss'].update_state(loss) metrics['train/accuracy'].update_state(labels, logits) metrics['train/ece'].add_batch(probs, label=labels) for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)): strategy.run(step_fn, args=(next(iterator), )) @tf.function def test_step(iterator, dataset_name): """Evaluation StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] if FLAGS.ensemble_size > 1: images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) logits = tf.reshape([ model(images, training=False) for _ in range(FLAGS.num_eval_samples) ], [FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, NUM_CLASSES]) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) all_probs = tf.nn.softmax(logits) probs = tf.math.reduce_mean(all_probs, axis=[0, 1]) # marginalize # Negative log marginal likelihood computed in a numerically-stable way. labels_broadcasted = tf.broadcast_to(labels, [ FLAGS.num_eval_samples, FLAGS.ensemble_size, tf.shape(labels)[0] ]) log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy( labels_broadcasted, logits, from_logits=True) negative_log_likelihood = tf.reduce_mean( -tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) + tf.math.log(float(FLAGS.num_eval_samples * FLAGS.ensemble_size))) l2_loss = compute_l2_loss(model) kl = sum(model.losses) / IMAGENET_VALIDATION_IMAGES elbo = -(negative_log_likelihood + l2_loss + kl) if dataset_name == 'clean': if FLAGS.ensemble_size > 1: per_probs = tf.reduce_mean(all_probs, axis=0) # marginalize samples metrics['test/diversity'].add_batch(per_probs) for i in range(FLAGS.ensemble_size): member_probs = per_probs[i] member_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, member_probs) metrics['test/nll_member_{}'.format(i)].update_state( member_loss) metrics['test/accuracy_member_{}'.format( i)].update_state(labels, member_probs) metrics['test/member_accuracy_mean'].update_state( labels, member_probs) metrics['test/member_ece_mean'].add_batch(member_probs, label=labels) metrics['test/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['test/kl'].update_state(kl) metrics['test/elbo'].update_state(elbo) metrics['test/accuracy'].update_state(labels, probs) metrics['test/ece'].add_batch(probs, label=labels) else: corrupt_metrics['test/nll_{}'.format( dataset_name)].update_state(negative_log_likelihood) corrupt_metrics['test/kl_{}'.format( dataset_name)].update_state(kl) corrupt_metrics['test/elbo_{}'.format( dataset_name)].update_state(elbo) corrupt_metrics['test/accuracy_{}'.format( dataset_name)].update_state(labels, probs) corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch( probs, label=labels) for _ in tf.range(tf.cast(steps_per_eval, tf.int32)): strategy.run(step_fn, args=(next(iterator), )) train_iterator = iter(train_dataset) start_time = time.time() for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) train_step(train_iterator) current_step = (epoch + 1) * steps_per_epoch max_steps = steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) logging.info(message) datasets_to_evaluate = {'clean': test_datasets['clean']} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): datasets_to_evaluate = test_datasets for dataset_name, test_dataset in datasets_to_evaluate.items(): logging.info('Testing on dataset %s', dataset_name) test_iterator = iter(test_dataset) logging.info('Starting to run eval at epoch: %s', epoch) test_step(test_iterator, dataset_name) logging.info('Done with testing on %s', dataset_name) corrupt_results = {} if (FLAGS.corruptions_interval > 0 and (epoch + 1) % FLAGS.corruptions_interval == 0): corrupt_results = utils.aggregate_corrupt_metrics( corrupt_metrics, corruption_types, max_intensity, FLAGS.alexnet_errors_path) logging.info('Train Loss: %.4f, Accuracy: %.2f%%', metrics['train/loss'].result(), metrics['train/accuracy'].result() * 100) logging.info('Test NLL: %.4f, Accuracy: %.2f%%', metrics['test/negative_log_likelihood'].result(), metrics['test/accuracy'].result() * 100) for i in range(FLAGS.ensemble_size): logging.info( 'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i, metrics['test/nll_member_{}'.format(i)].result(), metrics['test/accuracy_member_{}'.format(i)].result() * 100) total_results = { name: metric.result() for name, metric in metrics.items() } total_results.update(corrupt_results) # Results from Robustness Metrics themselves return a dict, so flatten them. total_results = utils.flatten_dictionary(total_results) with summary_writer.as_default(): for name, result in total_results.items(): tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved checkpoint to %s', checkpoint_name) final_checkpoint_name = checkpoint.save( os.path.join(FLAGS.output_dir, 'checkpoint')) logging.info('Saved last checkpoint to %s', final_checkpoint_name) with summary_writer.as_default(): hp.hparams({ 'base_learning_rate': FLAGS.base_learning_rate, 'one_minus_momentum': FLAGS.one_minus_momentum, 'l2': FLAGS.l2, 'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier, 'num_eval_samples': FLAGS.num_eval_samples, })