Exemplo n.º 1
0
def main(unused_argv):
  os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
  if FLAGS.clear:
    if os.path.exists(FLAGS.eval_dir):
      shutil.rmtree(FLAGS.eval_dir)

  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  if FLAGS.pipeline_config_path:
    model_config, eval_config, input_config = get_configs_from_pipeline_file()
  else:
    model_config, eval_config, input_config = get_configs_from_multiple_files()

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      is_training=False)

  create_input_dict_fn = functools.partial(
      input_reader_builder.build,
      input_config)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                     FLAGS.checkpoint_dir, FLAGS.eval_dir)
Exemplo n.º 2
0
def main(unused_argv):
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  tf.gfile.MakeDirs(FLAGS.eval_dir)
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
    tf.gfile.Copy(FLAGS.pipeline_config_path,
                  os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                  overwrite=True)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        eval_config_path=FLAGS.eval_config_path,
        eval_input_config_path=FLAGS.input_config_path)
    for name, config in [('model.config', FLAGS.model_config_path),
                         ('eval.config', FLAGS.eval_config_path),
                         ('input.config', FLAGS.input_config_path)]:
      tf.gfile.Copy(config,
                    os.path.join(FLAGS.eval_dir, name),
                    overwrite=True)

  model_config = configs['model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']
  if FLAGS.eval_training_data:
    input_config = configs['train_input_config']

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      is_training=False)

  def get_next(config):
    return dataset_util.make_initializable_iterator(
        dataset_builder.build(config)).get_next()

  create_input_dict_fn = functools.partial(get_next, input_config)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  if FLAGS.run_once:
    eval_config.max_evals = 1

  graph_rewriter_fn = None
  if 'graph_rewriter_config' in configs:
    graph_rewriter_fn = graph_rewriter_builder.build(
        configs['graph_rewriter_config'], is_training=False)

  evaluator.evaluate(
      create_input_dict_fn,
      model_fn,
      eval_config,
      categories,
      FLAGS.checkpoint_dir,
      FLAGS.eval_dir,
      graph_hook_fn=graph_rewriter_fn)
Exemplo n.º 3
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    if FLAGS.pipeline_config_path:
        model_config, eval_config, input_config = get_configs_from_pipeline_file(
        )
    else:
        model_config, eval_config, input_config = get_configs_from_multiple_files(
        )

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    create_input_dict_fn = functools.partial(input_reader_builder.build,
                                             input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    mitosis_map = [0, 0]
    mitosis_class = 'mitosis'

    evaluator.evaluate(FLAGS.training_process, mitosis_map, mitosis_class,
                       create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir)
Exemplo n.º 4
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    if FLAGS.pipeline_config_path:
        model_config, eval_config, input_config = get_configs_from_pipeline_file(
        )
    else:
        model_config, eval_config, input_config = get_configs_from_multiple_files(
        )

    if os.path.exists(eval_config.visualization_export_dir) == False:
        print(eval_config.visualization_export_dir)
        os.makedirs(eval_config.visualization_export_dir)

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    create_input_dict_fn = functools.partial(input_reader_builder.build,
                                             input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir)
Exemplo n.º 5
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    if FLAGS.run_on_CPU_only == True:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    evaluator.evaluate(create_input_dict_fn,
                       model_fn,
                       eval_config,
                       categories,
                       FLAGS.checkpoint_dir,
                       FLAGS.eval_dir,
                       graph_hook_fn=graph_rewriter_fn)
Exemplo n.º 6
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    create_input_dict_fn = functools.partial(input_reader_builder.build,
                                             input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    #evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
    #                   FLAGS.checkpoint_dir, FLAGS.eval_dir)
    evaluator.evaluate(FLAGS.report_filename, create_input_dict_fn, model_fn,
                       eval_config, categories, FLAGS.checkpoint_dir,
                       FLAGS.eval_dir)

    #writing the sorted report and best model statistics
    df = pd.read_csv(FLAGS.report_filename)
    maxval = df.loc[df['mean_ap'].idxmax()]
    df1 = df.sort_values('model-iter')
    names = FLAGS.report_filename.split('/')
    dir_path = ''
    for i in range(len(names) - 1):
        if i > 0:
            dir_path = dir_path + '/' + names[i]
    df1.to_csv(dir_path + '/sorted-' + names[-1])
    maxval.to_csv(dir_path + '/best' + names[-1])
Exemplo n.º 7
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    if len(FLAGS.convDict_path) != 0:
        convDict = pickle.load(open(FLAGS.convDict_path, 'rb'))
    else:
        convDict = None

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False,
                                 add_summaries=True,
                                 convDict=convDict)

    def get_next(config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir)
Exemplo n.º 8
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']
    else:
        input_config = configs['eval_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    create_input_dict_fn = functools.partial(input_reader_builder.build,
                                             input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    print(label_map)
    #max_num_classes = max([item.id for item in label_map.item])
    max_num_classes = 764

    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir)
Exemplo n.º 9
0
 def evaluate(self, eval_pipeline_file, model_dir, eval_dir):
     configs = self._get_configs_from_pipeline_file(eval_pipeline_file)
     model_config = configs['model']
     eval_config = configs['eval_config']
     input_config = configs['eval_input_config']
     model_fn = functools.partial(model_builder.build,
                                  model_config=model_config,
                                  is_training=True)
     create_input_dict_fn = functools.partial(self.get_next, input_config)
     label_map = label_map_util.load_labelmap(input_config.label_map_path)
     max_num_classes = max([item.id for item in label_map.item])
     categories = label_map_util.convert_label_map_to_categories(
         label_map, max_num_classes)
     evaluator.evaluate(create_input_dict_fn, model_fn, eval_config,
                        categories, model_dir, eval_dir)
Exemplo n.º 10
0
def main(unused_argv):
    assert FLAGS.train_dir, '`train_dir` is missing.'

    eval_dir = os.path.join(FLAGS.train_dir, 'eval_logs')
    ckpt_dir = os.path.join(FLAGS.train_dir, 'train_logs')

    tf.gfile.MakeDirs(eval_dir)

    pipeline_config_path = os.path.join(FLAGS.train_dir, 'model.config')
    configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
    tf.gfile.Copy(pipeline_config_path,
                  os.path.join(eval_dir, 'pipeline.config'),
                  overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    best_eval_metrics = {'best_mAP': 0.0, 'best_tot_loss': 999.9}

    evaluator.evaluate(create_input_dict_fn,
                       model_fn,
                       eval_config,
                       categories,
                       ckpt_dir,
                       eval_dir,
                       best_eval_metrics=best_eval_metrics)
Exemplo n.º 11
0
def eval_main(max_number_of_evaluations=None):
    if FLAGS.eval_label:
        if FLAGS.pipeline_config_path == '':
            FLAGS.pipeline_config_path = '../configs/test/' + FLAGS.eval_label + '.config'
        if FLAGS.checkpoint_dir == '':
            FLAGS.checkpoint_dir = '../checkpoints/train/' + FLAGS.eval_label
        FLAGS.eval_dir = '../checkpoints/eval/' + FLAGS.eval_label
        FLAGS.eval_tag = FLAGS.eval_label

    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'

    if FLAGS.pipeline_config_path:
        model_config, eval_config, input_config = get_configs_from_pipeline_file(
        )
    elif FLAGS.eval_config_path:
        model_config, eval_config, input_config = get_configs_from_multiple_files(
        )
    else:
        model_config, eval_config, input_config = get_configs_from_checkpoint_dir(
        )

    if not FLAGS.eval_dir:
        if not FLAGS.eval_tag:
            FLAGS.eval_tag = time.strftime("%Y%m%d-%H%M%S")
        FLAGS.eval_dir = utils.mkdir_p(FLAGS.checkpoint_dir + '_eval_' +
                                       FLAGS.eval_tag)

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    create_input_dict_fn = functools.partial(input_reader_builder.build,
                                             input_config)

    input_path = input_config.tf_record_input_reader.input_path
    num_examples = sum(1 for _ in tf.python_io.tf_record_iterator(input_path))

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir, num_examples,
                       FLAGS.gpu_fraction, max_number_of_evaluations)
Exemplo n.º 12
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']
    else:
        input_config = configs['eval_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    create_input_dict_fn = functools.partial(input_reader_builder.build,
                                             input_config)

    categories = []
    categories.append({'id': 1, 'name': 'Daisy'})

    if FLAGS.run_once:
        eval_config.max_evals = 1

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir)
    print "Hello WOrld!"
Exemplo n.º 13
0
def run_eval(checkpoint_dir, eval_dir, pipeline_config_path, num_examples):
    run_once = False
    assert checkpoint_dir, '`checkpoint_dir` is missing.'
    assert eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(eval_dir)
    if pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        tf.gfile.Copy(pipeline_config_path,
                      os.path.join(eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        return

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    # if eval_training_data:
    #   input_config = configs['train_input_config']

    eval_config.num_examples = num_examples
    eval_config.max_evals = 1

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_builder.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if run_once:
        eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    result = evaluator.evaluate(create_input_dict_fn,
                                model_fn,
                                eval_config,
                                categories,
                                checkpoint_dir,
                                eval_dir,
                                graph_hook_fn=graph_rewriter_fn)

    return result
Exemplo n.º 14
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']

    if FLAGS.mini_batch:
        input_config.shuffle = True

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    create_input_dict_fn = functools.partial(input_reader_builder.build,
                                             input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    if FLAGS.mini_batch:
        eval_config.max_evals = 1
        eval_config.num_visualizations = 100
        eval_config.num_examples = 100
        eval_config.visualization_export_dir = os.path.join(
            FLAGS.eval_dir, 'images')
        os.makedirs(eval_config.visualization_export_dir, exist_ok=True)
    metrics = evaluator.evaluate(create_input_dict_fn, model_fn, eval_config,
                                 categories, FLAGS.checkpoint_dir,
                                 FLAGS.eval_dir)
    process_metrics(metrics)
Exemplo n.º 15
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(FLAGS.pipeline_config_path)
        tf.gfile.Copy(
            FLAGS.pipeline_config_path,
            os.path.join(FLAGS.eval_dir, 'pipeline.config'),
            overwrite=True
        )
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path
        )
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(
        model_builder.build,
        model_config=model_config,
        is_training=False)

    def get_next(config):
        return dataset_util.make_initializable_iterator(dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    metrics = evaluator.evaluate(
        create_input_dict_fn,
        model_fn,
        eval_config,
        categories,
        FLAGS.checkpoint_dir,
        FLAGS.eval_dir
    )
    print(json.dumps({'PascalBoxes_Precision/[email protected]': str(metrics['PascalBoxes_Precision/[email protected]'])}))
Exemplo n.º 16
0
def evaluate_step():
    print("Evaluating")
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    model_config, eval_config, input_config = get_eval_configs_from_pipeline_file(
    )

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    create_input_dict_fn = functools.partial(input_reader_builder.build,
                                             input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.train_dir, FLAGS.eval_dir)
Exemplo n.º 17
0
def main(unused_argv):
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  model_config, train_config, input_config, eval_config = get_configs_from_pipeline_file()

  model_fn = functools.partial(
      build_man_model,
      model_config=model_config,
      is_training=False)

  create_input_dict_fn = functools.partial(
      input_reader_builder.build,
      input_config)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                     FLAGS.checkpoint_dir, FLAGS.eval_dir)
Exemplo n.º 18
0
def main(unused_argv):
  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  if FLAGS.pipeline_config_path:
    model_config, eval_config, input_config = get_configs_from_pipeline_file()
  else:
    model_config, eval_config, input_config = get_configs_from_multiple_files()

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      is_training=False)

  create_input_dict_fn = functools.partial(
      input_reader_builder.build,
      input_config)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                     FLAGS.checkpoint_dir, FLAGS.eval_dir)
Exemplo n.º 19
0
 def evaluate():
   return evaluator.evaluate(eval_input_dict_fn, eval_model_fn, eval_config,
                             categories, FLAGS.train_dir, FLAGS.eval_dir,
                             graph_hook_fn=eval_graph_rewriter_fn)
Exemplo n.º 20
0
def main(pipeline_config_path, checkpoint_dir, eval_dir, eval_training_data=False, 
        eval_config_path="", input_config_path="", 
        model_config_path="", run_once=False):
  """
  DEFINE_boolean('eval_training_data', False,
                       'If training data should be evaluated for this job.')
  DEFINE_string('checkpoint_dir', '',
                      'Directory containing checkpoints to evaluate, typically '
                      'set to `train_dir` used in the training job.')
  DEFINE_string('eval_dir', '',
                      'Directory to write eval summaries to.')
  DEFINE_string('pipeline_config_path', '',
                      'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
                      'file. If provided, other configs are ignored')
  DEFINE_string('eval_config_path', '',
                      'Path to an eval_pb2.EvalConfig config file.')
  DEFINE_string('input_config_path', '',
                      'Path to an input_reader_pb2.InputReader config file.')
  DEFINE_string('model_config_path', '',
                      'Path to a model_pb2.DetectionModel config file.')
  DEFINE_boolean('run_once', False, 'Option to only run a single pass of '
                       'evaluation. Overrides the `max_evals` parameter in the '
                       'provided config.')
  """
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.gfile.MakeDirs(eval_dir)
  if pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        pipeline_config_path)
    tf.gfile.Copy(pipeline_config_path,
                  os.path.join(eval_dir, 'pipeline.config'),
                  overwrite=True)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=model_config_path,
        eval_config_path=eval_config_path,
        eval_input_config_path=input_config_path)
    for name, config in [('model.config', model_config_path),
                         ('eval.config', eval_config_path),
                         ('input.config', input_config_path)]:
      tf.gfile.Copy(config,
                    os.path.join(eval_dir, name),
                    overwrite=True)

  model_config = configs['model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']
  if eval_training_data:
    input_config = configs['train_input_config']

  model_fn = functools.partial(
      model_builder.build,
      model_config=model_config,
      is_training=False)

  def get_next(config):
    return dataset_builder.make_initializable_iterator(
        dataset_builder.build(config)).get_next()

  create_input_dict_fn = functools.partial(get_next, input_config)

  label_map = label_map_util.load_labelmap(input_config.label_map_path)
  max_num_classes = max([item.id for item in label_map.item])
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes)

  if run_once:
    eval_config.max_evals = 1

  graph_rewriter_fn = None
  if 'graph_rewriter_config' in configs:
    graph_rewriter_fn = graph_rewriter_builder.build(
        configs['graph_rewriter_config'], is_training=False)

  evaluator.evaluate(
      create_input_dict_fn,
      model_fn,
      eval_config,
      categories,
      checkpoint_dir,
      eval_dir,
      graph_hook_fn=graph_rewriter_fn)
Exemplo n.º 21
0
Arquivo: eval.py Projeto: hitlk/models
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)

    wait_time = 300
    while wait_time > 0:
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
        if latest_checkpoint:
            num_steps = latest_checkpoint.split('-')[-1]
            if int(num_steps) > 0:
                wait_time = 0
        if wait_time > 0:
            tf.logging.info("waiting for checkpoint...")
            time.sleep(wait_time)

    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    do_augmentation = False

    if input_config.WhichOneof('input_reader') == 'tf_record_input_reader':
        input_reader_config = input_config.tf_record_input_reader
        input_path = input_reader_config.input_path
        if not input_path or not input_path[0]:
            do_augmentation = True
            train_input_config = configs['train_input_config']
            train_input_reader_config = train_input_config.tf_record_input_reader
            input_reader_config.input_path[:] = train_input_reader_config.input_path[:]

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    evaluator.evaluate(create_input_dict_fn,
                       model_fn,
                       eval_config,
                       categories,
                       FLAGS.checkpoint_dir,
                       FLAGS.eval_dir,
                       do_augmentation=do_augmentation)
Exemplo n.º 22
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_builder.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    metadata = {
        'outputs': [{
            'type': 'tensorboard',
            'source': FLAGS.eval_dir,
        }]
    }
    with open('/mlpipeline-ui-metadata.json', 'w') as f:
        json.dump(metadata, f)

    evaluator.evaluate(create_input_dict_fn,
                       model_fn,
                       eval_config,
                       categories,
                       FLAGS.checkpoint_dir,
                       FLAGS.eval_dir,
                       graph_hook_fn=graph_rewriter_fn)
Exemplo n.º 23
0
def main(unused_argv):
    checkpoint_dir = './train'
    eval_dir = './eval'
    pipeline_config_path = './pipeline_config.config'

    model_config_path = './train'
    eval_config_path = './train'
    input_config_path = './train'

    run_once = False
    eval_training_data = False

    assert checkpoint_dir, '`checkpoint_dir` is missing.'
    assert eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(eval_dir)
    if pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        tf.gfile.Copy(pipeline_config_path,
                      os.path.join(eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=model_config_path,
            eval_config_path=eval_config_path,
            eval_input_config_path=input_config_path)
        for name, config in [('model.config', model_config_path),
                             ('eval.config', eval_config_path),
                             ('input.config', input_config_path)]:
            tf.gfile.Copy(config, os.path.join(eval_dir, name), overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if run_once:
        eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    evaluator.evaluate(create_input_dict_fn,
                       model_fn,
                       eval_config,
                       categories,
                       checkpoint_dir,
                       eval_dir,
                       graph_hook_fn=graph_rewriter_fn)
Exemplo n.º 24
0
def main(unused_argv):
    # Uncomment the next lines on Linux to run the evaluation on the CPU
    # config = tf.ConfigProto(
    #         device_count={'GPU': 0}
    # )
    # sess = tf.Session(config=config)

    # Uncomment the next line on Windows to run the evaluation on the CPU
    # os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    # Use the following lines to restrict this process to only 30% of the GPU VRAM
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir)
def _run_checkpoint_once(tensor_dict,
                         evaluators=None,
                         batch_processor=None,
                         checkpoint_dirs=None,
                         variables_to_restore=None,
                         restore_fn=None,
                         num_batches=1,
                         master='',
                         save_graph=False,
                         save_graph_dir='',
                         losses_dict=None):
    """Evaluates metrics defined in evaluators and returns summaries.

  This function loads the latest checkpoint in checkpoint_dirs and evaluates
  all metrics defined in evaluators. The metrics are processed in batch by the
  batch_processor.

  Args:
    tensor_dict: a dictionary holding tensors representing a batch of detections
      and corresponding groundtruth annotations.
    evaluators: a list of object of type DetectionEvaluator to be used for
      evaluation. Note that the metric names produced by different evaluators
      must be unique.
    batch_processor: a function taking four arguments:
      1. tensor_dict: the same tensor_dict that is passed in as the first
        argument to this function.
      2. sess: a tensorflow session
      3. batch_index: an integer representing the index of the batch amongst
        all batches
      By default, batch_processor is None, which defaults to running:
        return sess.run(tensor_dict)
      To skip an image, it suffices to return an empty dictionary in place of
      result_dict.
    checkpoint_dirs: list of directories to load into an EnsembleModel. If it
      has only one directory, EnsembleModel will not be used --
        a DetectionModel
      will be instantiated directly. Not used if restore_fn is set.
    variables_to_restore: None, or a dictionary mapping variable names found in
      a checkpoint to model variables. The dictionary would normally be
      generated by creating a tf.train.ExponentialMovingAverage object and
      calling its variables_to_restore() method. Not used if restore_fn is set.
    restore_fn: None, or a function that takes a tf.Session object and correctly
      restores all necessary variables from the correct checkpoint file. If
      None, attempts to restore from the first directory in checkpoint_dirs.
    num_batches: the number of batches to use for evaluation.
    master: the location of the Tensorflow session.
    save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
    save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
      is True this must be non-empty.
    losses_dict: optional dictionary of scalar detection losses.

  Returns:
    global_step: the count of global steps.
    all_evaluator_metrics: A dictionary containing metric names and values.

  Raises:
    ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
      one element.
    ValueError: if save_graph is True and save_graph_dir is not defined.
  """

    global result_losses_dict
    if save_graph and not save_graph_dir:
        raise ValueError( '`save_graph_dir` must be defined.' )
    sess: Session = tf.Session( master, graph=tf.get_default_graph() )
    sess.run( tf.global_variables_initializer() )
    sess.run( tf.local_variables_initializer() )
    sess.run( tf.tables_initializer() )
    if restore_fn:
        restore_fn( sess )
    else:
        if not checkpoint_dirs:
            raise ValueError( '`checkpoint_dirs` must have at least one entry.' )
        checkpoint_file = tf.train.latest_checkpoint( checkpoint_dirs[0] )
        saver = tf.train.Saver( variables_to_restore )
        saver.restore( sess, checkpoint_file )

    if save_graph:
        tf.train.write_graph( sess.graph_def, save_graph_dir, 'eval.pbtxt' )

    counters = {'skipped': 0, 'success': 0}
    aggregate_result_losses_dict = collections.defaultdict( list )
    with tf.contrib.slim.queues.QueueRunners( sess ):
        try:
            for batch in range( int( num_batches ) ):
                if (batch + 1) % 100 == 0:
                    logging.info( 'Running eval ops batch %d/%d', batch + 1, num_batches )
                if not batch_processor:
                    try:
                        if not losses_dict:
                            losses_dict = {}
                        result_dict, result_losses_dict = sess.run( [tensor_dict,
                                                                     losses_dict] )
                        counters['success'] += 1
                    except tf.errors.InvalidArgumentError:
                        logging.info( 'Skipping image' )
                        counters['skipped'] += 1
                        result_dict = {}
                else:
                    result_dict, result_losses_dict = batch_processor(
                        tensor_dict, sess, batch, counters, losses_dict=losses_dict )
                if not result_dict:
                    continue
                for key, value in iter( result_losses_dict.items() ):
                    aggregate_result_losses_dict[key].append( value )
                for evaluator in evaluators:
                    # TODO(b/65130867): Use image_id tensor once we fix the input data
                    # decoders to return correct image_id.
                    # TODO(akuznetsa): result_dict contains batches of images, while
                    # add_single_ground_truth_image_info expects a single image. Fix
                    evaluator.add_single_ground_truth_image_info(
                        image_id=batch, groundtruth_dict=result_dict )
                    evaluator.add_single_detected_image_info(
                        image_id=batch, detections_dict=result_dict )
            logging.info( 'Running eval batches done.' )
        except tf.errors.OutOfRangeError:
            logging.info( 'Done evaluating -- epoch limit reached' )
        finally:
            # When done, ask the threads to stop.
            logging.info( '# success: %d', counters['success'] )
            logging.info( '# skipped: %d', counters['skipped'] )
            all_evaluator_metrics = {}
            for evaluator in evaluators:
                metrics = evaluator.evaluate()
                evaluator.clear()
                if any( key in all_evaluator_metrics for key in metrics ):
                    raise ValueError( 'Metric names between evaluators must not collide.' )
                all_evaluator_metrics.update( metrics )

            with sess.graph.as_default():
                # global_step = tf.train.global_step( sess, tf.train.get_global_step() )
                global_step = 200000
            for key, value in iter( aggregate_result_losses_dict.items() ):
                all_evaluator_metrics['Losses/' + key] = np.mean( value )
    sess.close()
    # print( all_evaluator_metrics )
    return all_evaluator_metrics