def read_all_eval_subdir(ckpt_dir, dataset_name, directory_path,
                         corruption_type, global_step, dest_dir, params):
    """Get metrics from many subdirectories."""

    split = ckpt_dir.split("/")
    pruning_method = split[11]

    # we pass the updated eval and train string to the params dictionary.

    if pruning_method == "baseline":
        params["pruning_method"] = None
    else:
        params["pruning_method"] = pruning_method

    if dataset_name == "imagenet_c":
        directory_path = os.path.join(directory_path, corruption_type, "3.0.1")
        params["data_dir"] = os.path.join(directory_path, "imagenet2012_*")
    elif dataset_name == "imagenet_a":
        params["data_dir"] = os.path.join(directory_path, "validation*")
    else:
        raise ValueError("dataset not found")

    print(params["data_dir"])
    params["dataset"] = dataset_name
    params["task"] = "robustness_" + dataset_name
    params["train_dir"] = ckpt_dir
    params["is_training"] = False
    params["sloppy_shuffle"] = True

    eval_metrics = model_utils.initiate_task_helper(ckpt_directory=ckpt_dir,
                                                    model_params=params,
                                                    pruning_params=None)
    print(eval_metrics)

    df = pd.DataFrame.from_dict(eval_metrics, orient="index").reset_index()
    df["exp"] = split[8]
    df["split"] = split[9]
    df["pruning_method"] = split[11]
    df["fraction_pruned"] = split[12]
    df["start_pruning_step"] = split[13]
    df["end_pruning_step"] = split[14]
    df["pruning_frequency"] = split[15]
    df["global_step"] = global_step
    timestamp = str(time.time())
    if dataset_name == "imagenet_c":
        df["corruption"] = corruption_type
        df["corruption_intensity"] = int(corruption_type[-1])
        filename = "{}_{}_{}.csv".format(corruption_type, str(split[8]),
                                         timestamp)
    else:
        filename = "{}_{}.csv".format(str(split[8]), timestamp)

    if not tf.gfile.IsDirectory(dest_dir):
        tf.gfile.MakeDirs(dest_dir)

    output_dir = os.path.join(dest_dir, filename)
    with tf.gfile.Open(output_dir, "w") as f:
        tf.logging.info("outputting to csv now.")
        df.to_csv(f)
예제 #2
0
def predictions_from_checkpoint_dir(directory_path, filename, params,
                                    ckpt_directory, global_step):
    """Outputs predictions as a pandas dataframe.

  Args:
    directory_path: The path to the directory where dataset is stored.
    filename: The shard to retrieve predictions for.
    params: Dictionary of training and eval specific params.
    ckpt_directory: Path to the directory where checkpoint is stored.
    global_step: Training Step at which eval metrics were stored.

  Returns:
    When run on full dataset (test_small_sample=False) returns a pandas
    dataframe with predictions for all images on specified shard.

  Raises:
    ValueError when checkpoint is not stored in the correct format.
  """

    split = ckpt_directory.split("/")
    pruning_method = split[11]
    # Assert statement to catch if ckp path is saved in correct way.
    if pruning_method not in ["threshold", "baseline"]:
        raise ValueError("Pruning method is not known %s" % (pruning_method))

    # We pass the updated eval and train string to the params dictionary.
    params["output_dir"] = ckpt_directory
    if pruning_method == "baseline":
        params["pruning_method"] = None
    else:
        params["pruning_method"] = pruning_method
    params["train_dir"] = ckpt_directory
    params["data_dir"] = directory_path
    params["split"] = filename
    params["is_training"] = False
    params["task"] = "imagenet_predictions"
    if FLAGS.test_small_sample:
        update_params = {
            "num_eval_images": 10,
        }
        params["test_small_sample"] = True
        params.update(update_params)
    else:
        params["test_small_sample"] = False

    predictions = model_utils.initiate_task_helper(
        ckpt_directory=ckpt_directory, model_params=params)
    if not FLAGS.test_small_sample:
        df = pd.DataFrame.from_records(list(predictions))
        df["exp"] = split[8]
        df["split"] = split[9]
        df["filename"] = filename
        df["pruning_method"] = split[11]
        df["fraction_pruned"] = split[12]
        df["start_pruning_step"] = split[13]
        df["end_pruning_step"] = split[14]
        df["pruning_frequency"] = split[15]
        df["global_step"] = global_step
        return df
def tags_from_checkpoint_dir(ckpt_directory, params):
    """Get metrics from event file.

  Args:
    ckpt_directory: model checkpoint directory containing event file.
    params: dictionary of params for model training and eval.

  Returns:
    pd.DataFrame containing metrics from event file
  """

    split = ckpt_directory.split("/")
    eval_metrics = model_utils.initiate_task_helper(
        model_params=params, ckpt_directory=ckpt_directory)
    df = pd.DataFrame.from_dict(eval_metrics, orient="index").reset_index()

    df["exp"] = split[10]
    df["pruning_method"] = split[11]
    df["fraction_pruned"] = split[12]
    df["start_pruning_step"] = split[13]
    df["end_pruning_step"] = split[14]
    df["pruning_frequency"] = split[15]
    df["ckpt"] = ckpt_directory
    return df
예제 #4
0
def main(argv):
    del argv  # Unused.

    initial_sparsity = 0.0
    pruning_hparams_string = ('begin_pruning_step={0},'
                              'sparsity_function_begin_step={0},'
                              'end_pruning_step={1},'
                              'sparsity_function_end_step={1},'
                              'target_sparsity={2},'
                              'initial_sparsity={3},'
                              'pruning_frequency={4},'
                              'threshold_decay=0,'
                              'block_width={5},'
                              'block_height={6}'.format(
                                  FLAGS.sparsity_begin_step,
                                  FLAGS.sparsity_end_step, FLAGS.end_sparsity,
                                  initial_sparsity, FLAGS.pruning_frequency,
                                  FLAGS.block_width, FLAGS.block_height))

    params = imagenet_params

    if FLAGS.test_small_sample:
        output_dir = '/tmp/imagenet_train_eval/'
    else:
        # configures train directories based upon hyperparameters.
        if FLAGS.pruning_method:
            folder_stub = os.path.join(FLAGS.pruning_method,
                                       str(FLAGS.end_sparsity),
                                       str(FLAGS.sparsity_begin_step),
                                       str(FLAGS.sparsity_end_step))
        else:
            folder_stub = os.path.join('baseline', str(0.0), str(0.0),
                                       str(0.0), str(0.0),
                                       str(FLAGS.resnet_depth))
            output_dir = os.path.join(FLAGS.output_dir, folder_stub)

    update_params = {
        'lr_schedule': [(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)],
        'momentum': 0.9,
        'data_format': 'channels_last',
        'output_dir': output_dir,
        'label_smoothing': FLAGS.label_smoothing,
    }
    params.update(update_params)

    if FLAGS.pruning_method != 'baseline':
        params['pruning_method'] = FLAGS.pruning_method
    else:
        params['pruning_method'] = None

    params['mode'] = FLAGS.mode
    if FLAGS.mode == 'train':
        params['batch_size'] = params['train_batch_size']
        params['task'] = 'imagenet_training'
        params['data_dir'] = FLAGS.train_dir
    else:
        params['batch_size'] = params['eval_batch_size']
        params['task'] = 'imagenet_eval'
        params['data_dir'] = FLAGS.eval_dir

    if FLAGS.test_small_sample:
        update_params = {
            'batch_size': 2,
            'num_train_steps': 10,
            'num_images': 2,
            'num_train_images': 10,
            'num_eval_images': 10,
        }
        params['test_small_sample'] = True
        params.update(update_params)
    else:
        params['test_small_sample'] = False

    if FLAGS.mode == 'eval':
        # Run evaluation when there's a new checkpoint
        for ckpt in tf2.train.checkpoints_iterator(params['output_dir']):
            tf.logging.info('Starting to evaluate.')
            try:
                _ = model_utils.initiate_task_helper(ckpt_directory=ckpt,
                                                     model_params=params,
                                                     pruning_params=None)
                current_step = int(os.path.basename(ckpt).split('-')[1])
                if current_step >= params['num_train_steps']:
                    tf.logging.info('Evaluation finished')
                    break
            except tf.errors.NotFoundError:
                tf.logging.info(
                    'Checkpoint was not found, skipping checkpoint.')

    else:
        if FLAGS.mode == 'train':
            tf.logging.info('start training...')
            model_utils.initiate_task_helper(
                ckpt_directory=None,
                model_params=params,
                pruning_params=pruning_hparams_string)
            tf.logging.info('finished training.')