Example #1
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    if FLAGS.gpu:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu

    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_builder.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    categories = label_map_util.create_categories_from_labelmap(
        input_config.label_map_path)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    evaluator.evaluate(create_input_dict_fn,
                       model_fn,
                       eval_config,
                       categories,
                       FLAGS.checkpoint_dir,
                       FLAGS.eval_dir,
                       graph_hook_fn=graph_rewriter_fn)
def read_data_and_evaluate(input_config, eval_config):
    """Reads pre-computed object detections and groundtruth from tf_record.

    Args:
      input_config: input config proto of type
        object_detection.protos.InputReader.
      eval_config: evaluation config proto of type
        object_detection.protos.EvalConfig.

    Returns:
      Evaluated detections metrics.

    Raises:
      ValueError: if input_reader type is not supported or metric type is unknown.
    """
    if input_config.WhichOneof('input_reader') == 'tf_record_input_reader':
        input_paths = input_config.tf_record_input_reader.input_path

        categories = label_map_util.create_categories_from_labelmap(
            input_config.label_map_path)

        object_detection_evaluators = evaluator.get_evaluators(
            eval_config, categories)
        # Support a single evaluator
        object_detection_evaluator = object_detection_evaluators[0]

        skipped_images = 0
        processed_images = 0
        for input_path in _generate_filenames(input_paths):
            tf.logging.info('Processing file: {0}'.format(input_path))

            record_iterator = tf.python_io.tf_record_iterator(path=input_path)
            data_parser = tf_example_parser.TfExampleDetectionAndGTParser()

            for string_record in record_iterator:
                tf.logging.log_every_n(tf.logging.INFO,
                                       'Processed %d images...', 1000,
                                       processed_images)
                processed_images += 1

                example = tf.train.Example()
                example.ParseFromString(string_record)
                decoded_dict = data_parser.parse(example)
                if decoded_dict:
                    object_detection_evaluator.add_single_ground_truth_image_info(
                        decoded_dict[
                            standard_fields.DetectionResultFields.key],
                        decoded_dict)
                    object_detection_evaluator.add_single_detected_image_info(
                        decoded_dict[
                            standard_fields.DetectionResultFields.key],
                        decoded_dict)
                else:
                    skipped_images += 1
                    tf.logging.info(
                        'Skipped images: {0}'.format(skipped_images))

        return object_detection_evaluator.evaluate()

    raise ValueError('Unsupported input_reader_config.')
Example #3
0
def evaluate_single_image(image_path, annotation_path):
    """ Evaluate mAP on image
	args:
		image_path: path to image
		annotation_path: path to groundtruth in Pascal VOC format .xml
	"""
    categories = create_categories_from_labelmap(args.label_file)
    label_map_dict = get_label_map_dict(args.label_file)
    coco_evaluator = coco_evaluation.CocoDetectionEvaluator(categories)
    image_name = os.path.basename(image_path).split('.')[0]

    # Read groundtruth from XML file in Pascal VOC format
    gt_boxes, gt_classes = voc_parser(annotation_path, label_map_dict)
    dt_boxes, dt_classes, dt_scores, num_det = postprocess_output(image_path)

    coco_evaluator.add_single_ground_truth_image_info(
        image_id=image_name,
        groundtruth_dict={
            standard_fields.InputDataFields.groundtruth_boxes:
            np.array(gt_boxes),
            standard_fields.InputDataFields.groundtruth_classes:
            np.array(gt_classes)
        })
    coco_evaluator.add_single_detected_image_info(
        image_id=image_name,
        detections_dict={
            standard_fields.DetectionResultFields.detection_boxes: dt_boxes,
            standard_fields.DetectionResultFields.detection_scores: dt_scores,
            standard_fields.DetectionResultFields.detection_classes: dt_classes
        })

    coco_evaluator.evaluate()
Example #4
0
def main(unused_argv):
  # Use the following lines to potentially restrict the training process to only 30% of the GPU V-RAM
  #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
  #sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

  assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
  assert FLAGS.eval_dir, '`eval_dir` is missing.'
  tf.gfile.MakeDirs(FLAGS.eval_dir)
  if FLAGS.pipeline_config_path:
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
    tf.gfile.Copy(
        FLAGS.pipeline_config_path,
        os.path.join(FLAGS.eval_dir, 'pipeline.config'),
        overwrite=True)
  else:
    configs = config_util.get_configs_from_multiple_files(
        model_config_path=FLAGS.model_config_path,
        eval_config_path=FLAGS.eval_config_path,
        eval_input_config_path=FLAGS.input_config_path)
    for name, config in [('model.config', FLAGS.model_config_path),
                         ('eval.config', FLAGS.eval_config_path),
                         ('input.config', FLAGS.input_config_path)]:
      tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True)

  model_config = configs['model']
  eval_config = configs['eval_config']
  input_config = configs['eval_input_config']
  if FLAGS.eval_training_data:
    input_config = configs['train_input_config']

  model_fn = functools.partial(
      model_builder.build, model_config=model_config, is_training=False)

  def get_next(config):
    return dataset_builder.make_initializable_iterator(
        dataset_builder.build(config)).get_next()

  create_input_dict_fn = functools.partial(get_next, input_config)

  categories = label_map_util.create_categories_from_labelmap(
      input_config.label_map_path)

  if FLAGS.run_once:
    eval_config.max_evals = 1

  graph_rewriter_fn = None
  if 'graph_rewriter_config' in configs:
    graph_rewriter_fn = graph_rewriter_builder.build(
        configs['graph_rewriter_config'], is_training=False)

  evaluator.evaluate(
      create_input_dict_fn,
      model_fn,
      eval_config,
      categories,
      FLAGS.checkpoint_dir,
      FLAGS.eval_dir,
      graph_hook_fn=graph_rewriter_fn)
Example #5
0
def read_data_and_evaluate(input_config, eval_config):
  """Reads pre-computed object detections and groundtruth from tf_record.

  Args:
    input_config: input config proto of type
      object_detection.protos.InputReader.
    eval_config: evaluation config proto of type
      object_detection.protos.EvalConfig.

  Returns:
    Evaluated detections metrics.

  Raises:
    ValueError: if input_reader type is not supported or metric type is unknown.
  """
  if input_config.WhichOneof('input_reader') == 'tf_record_input_reader':
    input_paths = input_config.tf_record_input_reader.input_path

    categories = label_map_util.create_categories_from_labelmap(
        input_config.label_map_path)

    object_detection_evaluators = evaluator.get_evaluators(
        eval_config, categories)
    # Support a single evaluator
    object_detection_evaluator = object_detection_evaluators[0]

    skipped_images = 0
    processed_images = 0
    for input_path in _generate_filenames(input_paths):
      tf.logging.info('Processing file: {0}'.format(input_path))

      record_iterator = tf.python_io.tf_record_iterator(path=input_path)
      data_parser = tf_example_parser.TfExampleDetectionAndGTParser()

      for string_record in record_iterator:
        tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
                               processed_images)
        processed_images += 1

        example = tf.train.Example()
        example.ParseFromString(string_record)
        decoded_dict = data_parser.parse(example)

        if decoded_dict:
          object_detection_evaluator.add_single_ground_truth_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
          object_detection_evaluator.add_single_detected_image_info(
              decoded_dict[standard_fields.DetectionResultFields.key],
              decoded_dict)
        else:
          skipped_images += 1
          tf.logging.info('Skipped images: {0}'.format(skipped_images))

    return object_detection_evaluator.evaluate()

  raise ValueError('Unsupported input_reader_config.')
Example #6
0
def evaluate(eval_dir, config_dir, checkpoint_dir, eval_training_data=False):
    '''
        Function used to evaluate your trained model. 

        Args: 
            Required:               
                eval_dir: The directory where the tfevent file will be saved.
                config_dir: The protobuf configuration directory.
                checkpoint_dir: The directory where the checkpoint you want to evaluate is.
            
            Optional:
                eval_training_data: Is set to True the evaluation will be run on the training dataset.

        Returns:
            A dictionnary of metrics ready to be sent to the picsell.ia platform.
    '''

    tf.reset_default_graph()
    tf.gfile.MakeDirs(eval_dir)
    configs = config_util.get_configs_from_pipeline_file(
        os.path.join(config_dir, "pipeline.config"))
    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_builder.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    categories = label_map_util.create_categories_from_labelmap(
        input_config.label_map_path)

    eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    metrics = evaluator.evaluate(create_input_dict_fn,
                                 model_fn,
                                 eval_config,
                                 categories,
                                 checkpoint_dir,
                                 eval_dir,
                                 graph_hook_fn=graph_rewriter_fn)
    return {k: str(round(v, 3)) for k, v in metrics.items()}
def evaluate(res_dir, annotations, label_map_path, full_report):
    '''
  Calculate OID metrics via evaluator class included in TF models repository
  https://github.com/tensorflow/models/tree/master/research/object_detection/metrics

  Reads pre-computed object detections and groundtruth.

  Args:
    res_dir: pre-computed object detections directory
    annotations: groundtruth (file with annotations)
    label_map_path: labelmap file

  Returns:
    Evaluated detections metrics.
  '''
    class EvaluatorConfig:
        metrics_set = ['open_images_V2_detection_metrics']

    eval_config = EvaluatorConfig()

    categories = label_map_util.create_categories_from_labelmap(label_map_path)
    class_map = label_map_util.get_label_map_dict(label_map_path, False, False)

    object_detection_evaluators = evaluator.get_evaluators(
        eval_config, categories)
    # Support a single evaluator
    object_detection_evaluator = object_detection_evaluators[0]

    print('Loading annotations...')
    ann = get_annotations(annotations, class_map)

    files = ck_utils.get_files(res_dir)
    for file_index, file_name in enumerate(files):
        if full_report:
            print('Loading detections and annotations for {} ({} of {}) ...'.
                  format(file_name, file_index + 1, len(files)))
        elif (file_index + 1) % 100 == 0:
            print('Loading detections and annotations: {} of {} ...'.format(
                file_index + 1, len(files)))
        det_file = os.path.join(res_dir, file_name)
        key = os.path.splitext(file_name)[0]
        detection = new_detection(key)
        fill_annotations(detection, ann[key])
        fill_detection(detection, det_file)

        object_detection_evaluator.add_single_ground_truth_image_info(
            detection[standard_fields.DetectionResultFields.key], detection)
        object_detection_evaluator.add_single_detected_image_info(
            detection[standard_fields.DetectionResultFields.key], detection)

    all_metrics = object_detection_evaluator.evaluate()
    mAP = all_metrics['OpenImagesV2_Precision/[email protected]']

    return mAP, 0, all_metrics
Example #8
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    configs = config_util.get_configs_from_pipeline_file(
        FLAGS.pipeline_config_path)
    tf.gfile.Copy(FLAGS.pipeline_config_path,
                  os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                  overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_builder.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    categories = label_map_util.create_categories_from_labelmap(
        input_config.label_map_path)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    evaluator.evaluate(create_input_dict_fn,
                       model_fn,
                       eval_config,
                       categories,
                       FLAGS.checkpoint_dir,
                       FLAGS.eval_dir,
                       graph_hook_fn=graph_rewriter_fn)
Example #9
0
def load_labelmap(voc_path, labelmap_file):
    """
        Loads labelmap file and reads data to different data structures
    Args:
        voc_path: Path to used VOC dataset
        labelmap_file: String containing the Name of labelmap_file

    Returns:
        categories: a list of dictionaries representing all possible categories
        labelmap_dict: a dictionary mapping label names to id
        category_index: a category index, which is a dictionary that maps integer ids to dicts containing categories

    """
    labelmap_path = os.path.join(voc_path, labelmap_file)
    category_index = label_map_util.create_category_index_from_labelmap(
        labelmap_path)
    categories = label_map_util.create_categories_from_labelmap(labelmap_path)
    labelmap_dict = label_map_util.get_label_map_dict(labelmap_path)

    return categories, labelmap_dict, category_index
Example #10
0
  def test_create_categories_from_labelmap(self):
    label_map_string = """
      item {
        id:1
        name:'dog'
      }
      item {
        id:2
        name:'cat'
      }
    """
    label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
    with tf.gfile.Open(label_map_path, 'wb') as f:
      f.write(label_map_string)

    categories = label_map_util.create_categories_from_labelmap(label_map_path)
    self.assertListEqual([{
        'name': u'dog',
        'id': 1
    }, {
        'name': u'cat',
        'id': 2
    }], categories)
Example #11
0
  def test_create_categories_from_labelmap(self):
    label_map_string = """
      item {
        id:1
        name:'dog'
      }
      item {
        id:2
        name:'cat'
      }
    """
    label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
    with tf.gfile.Open(label_map_path, 'wb') as f:
      f.write(label_map_string)

    categories = label_map_util.create_categories_from_labelmap(label_map_path)
    self.assertListEqual([{
        'name': u'dog',
        'id': 1
    }, {
        'name': u'cat',
        'id': 2
    }], categories)
Example #12
0
train_record_path = current_dir + '/annotations/train.record'
test_record_path = current_dir + '/annotations/test.record'

#labelmap_name = 'labelmap_12especes.pbtxt' # si on a les MESCHA : TrainV2 à V4 et TrainV6
#labelmap_name = 'labelmap_sansMESCHA.pbtxt' # TrainV5 => TODO param  of program !
labelmap_name = 'labelmap_14especes.pbtxt'
labelmap_path = current_dir + '/annotations/' + labelmap_name  # la labelmap d'entrainement sous forme d'objets

#########################################################################
# Récupération des labels pour nos classes
#########################################################################
# List of the strings that is used to add correct label for each box.
category_index = label_map_util.create_category_index_from_labelmap(labelmap_path, use_display_name=True)

# Récupère la labelmap (liste des labels) pour les afficher sur légende sur des graphiques
labelmap =  label_map_util.create_categories_from_labelmap(labelmap_path, use_display_name=True)

# Récupération des labels pour les afficher sur la légende des graphiques
labels = [i['name'] for i in labelmap]

# Construction de la liste des labels associés avec leurs couleurs respectives
handles = []
# Couleurs utilisées dans utils https://github.com/tensorflow/models/blob/master/research/object_detection/utils/visualization_utils.py#:~:text=_TITLE_TOP_MARGIN%20%3D%2010-,STANDARD_COLORS%20%3D%20%5B,-%27AliceBlue%27%2C%20%27Chartreuse%27%2C%20%27Aqua
for i in range(len(labels)):
  patch = mpatches.Patch(color=viz_utils.STANDARD_COLORS[i+1], label=labels[i])  # i+1 pour les couleurs car l'index de la labelmap commence à 1 (et la boucle à 0)
  handles.append(patch)

# Ajoute la couleur rouge pour les labels/annotations/groundtruths :
handles.append(mpatches.Patch(color='red', label='Labels'))

#########################################################################
Example #13
0
    best_class_id = values[np.argmax(weights * mean_propabilities)]

    best_class_name = id_to_category_dict[best_class_id]['name']

    return best_class_name


if __name__ == '__main__':
    groundtruth_annotation_path = os.path.join(VOC_PATH, 'Annotations')
    prediction_annotation_path = os.path.join(PREDICTION_OUTPUT_PATH,
                                              'voc/Annotations')
    softvoting_path = os.path.join(PREDICTION_OUTPUT_PATH,
                                   'voc/Annotations_softvoting')

    category_name_to_index = label_map_util.get_label_map_dict(LABELMAP_PATH)
    id_to_category_list = label_map_util.create_categories_from_labelmap(
        LABELMAP_PATH)

    evaluator_base = CocoDetectionEvaluator(categories=id_to_category_list,
                                            include_metrics_per_category=True,
                                            all_metrics_per_category=False)
    evaluator_softvoting = CocoDetectionEvaluator(
        categories=id_to_category_list,
        include_metrics_per_category=True,
        all_metrics_per_category=False)

    for file in os.listdir(prediction_annotation_path):
        # add groundtruth from voc file to evaluator_base and evaluator_softvoting
        groundtruth_image_id, groundtruth_boxes, groundtruth_classes, groundtruth_scores = read_voc_for_detections(
            os.path.join(groundtruth_annotation_path, file))
        if groundtruth_classes.size == 0:
            groundtruth_classes_index = np.empty(groundtruth_classes.shape)
    """
    tf.logging.info('Writing metrics.')

    with open(os.path.join(output_dir, 'metrics.csv'), 'w') as csvfile:
        metrics_writer = csv.writer(csvfile, delimiter=',')
        for metric_name, metric_value in metrics.items():
            metrics_writer.writerow([metric_name, str(metric_value)])


# FILENAME = "camera_data/training/training.record-00000-of-00075"
FILENAME = "../../old/predictions_v0/initial_crop_size_28/validation_detections.tfrecord-00000-of-00001"
data_parser = tf_example_parser.TfExampleDetectionAndGTParser()
dataset = tf.data.TFRecordDataset(FILENAME, compression_type='')

# serialized_example = next(iter(dataset))
categories = label_map_util.create_categories_from_labelmap(
    "../../data/camera_data/label_map.pbtxt")

eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(['coco_detection_metrics'])
# Per category metrics not working
# eval_config.include_metrics_per_category = True

evaluator_options = evaluator_options_from_eval_config(eval_config)
object_detection_evaluators = get_evaluators(eval_config, categories,
                                             evaluator_options)
object_detection_evaluator = object_detection_evaluators[0]


def scale_boxes_to_absolute_coordinates(decoded_dict):
    def _scale_box_to_absolute(args):
        boxes, height, width = args
start_time = time.time()

# Load saved model and build the detection function
detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)

end_time = time.time()
elapsed_time = end_time - start_time
print('Done! Took {} seconds'.format(elapsed_time))

# Load label map data (for plotting)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,
                                                                    use_display_name=True)

# Récupère la labelmap (liste des labels) pour les afficher sur légende sur des graphiques
labelmap =  label_map_util.create_categories_from_labelmap(PATH_TO_LABELS, use_display_name=True)

# Récupération des labels pour les afficher sur la légende des graphiques
labels = [i['name'] for i in labelmap]
print('Running inference for {}... '.format(VIDEO_PATHS), end='')

# Variables propres à la vidéo
video_name = os.path.basename(VIDEO_PATHS)
video = cv2.VideoCapture(VIDEO_PATHS)
fps = video.get(cv2.CAP_PROP_FPS)
number_of_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
print('\nNombre d\'images par secondes (FPS) : ', fps)
print('Total number of frames : ', number_of_frames)
print('Durée de la vidéo : ', number_of_frames/fps, ' secondes')

# Dossier pour sauvegarder les frames (le créé s'il n'existe pas) :
Example #16
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_builder.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    categories = label_map_util.create_categories_from_labelmap(
        input_config.label_map_path)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    metrics_dict = evaluator.evaluate(create_input_dict_fn,
                                      model_fn,
                                      eval_config,
                                      categories,
                                      FLAGS.checkpoint_dir,
                                      FLAGS.eval_dir,
                                      graph_hook_fn=graph_rewriter_fn)

    with open(FLAGS.output_json_path, 'w') as op_json_file:
        temp_dict = {}
        for key, value in metrics_dict.items():
            temp_dict[key] = str(value)

        json.dump(temp_dict, op_json_file)