def evaluate(): """Extract embeddings.""" logdir = FLAGS.logdir setup_eval_dir(logdir) # Can ignore frame labels if dataset doesn't have per-frame labels. CONFIG.DATA.FRAME_LABELS = FLAGS.keep_labels # Subsample frames in case videos are long or fps is high to save memory. CONFIG.DATA.SAMPLE_ALL_STRIDE = FLAGS.sample_all_stride algo = get_algo(CONFIG.TRAINING_ALGO) _, optimizer, _ = get_lr_opt_global_step() restore_ckpt(logdir=logdir, optimizer=optimizer, **algo.model) if FLAGS.defun: algo.call = tf.function(algo.call) algo.compute_loss = tf.function(algo.compute_loss) iterator = create_one_epoch_dataset(FLAGS.dataset, FLAGS.split, mode='eval', path_to_tfrecords=FLAGS.path_to_tfrecords) max_embs = None if FLAGS.max_embs <= 0 else FLAGS.max_embs embeddings = get_embeddings_dataset( algo.model, iterator, frames_per_batch=FLAGS.frames_per_batch, keep_data=FLAGS.keep_data, keep_labels=FLAGS.keep_labels, max_embs=max_embs) np.save(gfile.Open(FLAGS.save_path, 'w'), embeddings)
def evaluate_once(algo, iterator_tasks, embedding_tasks, iterators, summary_writer): """Evaluate learnt embeddings on downstream tasks.""" # Sets up model for training. _, optimizer, global_step = get_lr_opt_global_step() restore_ckpt(logdir=CONFIG.LOGDIR, optimizer=optimizer, **algo.model) if global_step.numpy() == CONFIG.TRAIN.MAX_ITERS: global evaluated_last_ckpt evaluated_last_ckpt = True metrics = {} if iterator_tasks: with summary_writer.as_default(): with tf.summary.record_if(True): for task_name, task in iterator_tasks.items(): metrics[task_name] = task.evaluate(algo, global_step, iterators=iterators) max_embs = None if FLAGS.max_embs <= 0 else FLAGS.max_embs if embedding_tasks: frames_per_batch = CONFIG.EVAL.FRAMES_PER_BATCH for dataset_name in CONFIG.DATASETS: dataset = {'name': dataset_name} train_iterator = create_one_epoch_dataset( dataset_name, 'train', mode='eval', path_to_tfrecords=CONFIG.PATH_TO_TFRECORDS) dataset['train_dataset'] = get_embeddings_dataset( algo.model, train_iterator, frames_per_batch=frames_per_batch, max_embs=max_embs) val_iterator = create_one_epoch_dataset( dataset_name, 'val', mode='eval', path_to_tfrecords=CONFIG.PATH_TO_TFRECORDS) dataset['val_dataset'] = get_embeddings_dataset( algo.model, val_iterator, frames_per_batch=frames_per_batch, max_embs=max_embs) with summary_writer.as_default(): with tf.summary.record_if(True): for task_name, task in embedding_tasks.items(): if task_name not in metrics: metrics[task_name] = {} metrics[task_name][dataset_name] = task.evaluate( algo, global_step, embeddings_dataset=dataset) # Add all metrics in a separate tag so that analysis is easier. with summary_writer.as_default(): with tf.summary.record_if(True): for task_name in embedding_tasks.keys(): for dataset in CONFIG.DATASETS: tf.summary.scalar('metrics/%s_%s' % (dataset, task_name), metrics[task_name][dataset], step=global_step) avg_metric = sum(metrics[task_name].values()) avg_metric /= len(CONFIG.DATASETS) tf.summary.scalar('metrics/all_%s' % task_name, avg_metric, step=global_step)