Exemple #1
0
    def _process_batch(tensor_dict,
                       sess,
                       batch_index,
                       counters,
                       losses_dict=None):
        """Evaluates tensors in tensor_dict, losses_dict and visualizes examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
      losses_dict: Optional dictonary of scalar loss tensors.

    Returns:
      result_dict: a dictionary of numpy arrays
      result_losses_dict: a dictionary of scalar losses. This is empty if input
        losses_dict is None.
    """
        try:
            if not losses_dict:
                losses_dict = {}
            result_dict, result_losses_dict = sess.run(
                [tensor_dict, losses_dict])
            counters['success'] += 1
        except tf.errors.InvalidArgumentError:
            logging.info('Skipping image')
            counters['skipped'] += 1
            return {}, {}
        global_step = tf.compat.v1.train.global_step(
            sess, tf.compat.v1.train.get_global_step())
        if batch_index < eval_config.num_visualizations:
            tag = 'image-{}'.format(batch_index)
            eval_util.visualize_detection_results(
                result_dict,
                tag,
                global_step,
                categories=categories,
                summary_dir=eval_dir,
                export_dir=eval_config.visualization_export_dir,
                show_groundtruth=eval_config.visualize_groundtruth_boxes,
                groundtruth_box_visualization_color=eval_config.
                groundtruth_box_visualization_color,
                min_score_thresh=eval_config.min_score_threshold,
                max_num_predictions=eval_config.max_num_boxes_to_visualize,
                skip_scores=eval_config.skip_scores,
                skip_labels=eval_config.skip_labels,
                keep_image_id_for_visualization_export=eval_config.
                keep_image_id_for_visualization_export)
        return result_dict, result_losses_dict
Exemple #2
0
  def _process_batch(tensor_dict, sess, batch_index, counters,
                     losses_dict=None):
    """Evaluates tensors in tensor_dict, losses_dict and visualizes examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
      losses_dict: Optional dictonary of scalar loss tensors.

    Returns:
      result_dict: a dictionary of numpy arrays
      result_losses_dict: a dictionary of scalar losses. This is empty if input
        losses_dict is None.
    """
    try:
      if not losses_dict:
        losses_dict = {}
      result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict])
      counters['success'] += 1
    except tf.errors.InvalidArgumentError:
      logging.info('Skipping image')
      counters['skipped'] += 1
      return {}, {}
    global_step = tf.train.global_step(sess, tf.train.get_global_step())
    if batch_index < eval_config.num_visualizations:
      tag = 'image-{}'.format(batch_index)
      eval_util.visualize_detection_results(
          result_dict,
          tag,
          global_step,
          categories=categories,
          summary_dir=eval_dir,
          export_dir=eval_config.visualization_export_dir,
          show_groundtruth=eval_config.visualize_groundtruth_boxes,
          groundtruth_box_visualization_color=eval_config.
          groundtruth_box_visualization_color,
          min_score_thresh=eval_config.min_score_threshold,
          max_num_predictions=eval_config.max_num_boxes_to_visualize,
          skip_scores=eval_config.skip_scores,
          skip_labels=eval_config.skip_labels,
          keep_image_id_for_visualization_export=eval_config.
          keep_image_id_for_visualization_export)
    return result_dict, result_losses_dict
    def _process_batch(tensor_dict, sess, batch_index, counters, update_op):
        """Evaluates tensors in tensor_dict, visualizing the first K examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
      update_op: An update op that has to be run along with output tensors. For
        example this could be an op to compute statistics for slim metrics.

    Returns:
      result_dict: a dictionary of numpy arrays
    """
        def delete_key(dic, keys):
            for key in keys:
                if key in dic.keys():
                    del dic[key]

        b_visualize = batch_index < eval_config.num_visualizations

        try:
            out = sess.run([tensor_dict, update_op])
            result_dict = out[0]
            image_id = result_dict['image_id']
            tag = image_id

            counters['success'] += 1
        except tf.errors.InvalidArgumentError:
            logging.info('Skipping image')
            counters['skipped'] += 1
            return {}
        global_step = tf.train.global_step(sess, slim.get_global_step())
        if b_visualize:
            eval_util.visualize_detection_results(
                result_dict,
                tag,
                global_step,
                categories=categories,
                summary_dir=eval_dir,
                export_dir=eval_config.visualization_export_dir,
                show_groundtruth=True)
        return result_dict
    def _process_batch(tensor_dict, sess, batch_index, counters, update_op):
        """Evaluates tensors in tensor_dict, visualizing the first K examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
      update_op: An update op that has to be run along with output tensors. For
        example this could be an op to compute statistics for slim metrics.

    Returns:
      result_dict: a dictionary of numpy arrays
    """
        duration = time.time() - start
        print(batch_index, ": %.1f" % duration)

        if batch_index >= eval_config.num_visualizations:
            if 'original_image' in tensor_dict:
                tensor_dict = {
                    k: v
                    for (k, v) in tensor_dict.items() if k != 'original_image'
                }
        try:
            (result_dict, _) = sess.run([tensor_dict, update_op])
            counters['success'] += 1
        except tf.errors.InvalidArgumentError:
            logging.info('Skipping image')
            counters['skipped'] += 1
            return {}
        global_step = tf.train.global_step(sess, slim.get_global_step())
        if batch_index < eval_config.num_visualizations:
            tag = 'image-{}'.format(batch_index)
            eval_util.visualize_detection_results(
                result_dict,
                tag,
                global_step,
                categories=categories,
                summary_dir=eval_dir,
                export_dir=eval_config.visualization_export_dir,
                show_groundtruth=eval_config.visualization_export_dir)
        return result_dict
  def _process_batch(tensor_dict, sess, batch_index, counters, update_op):
    """Evaluates tensors in tensor_dict, visualizing the first K examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
      update_op: An update op that has to be run along with output tensors. For
        example this could be an op to compute statistics for slim metrics.

    Returns:
      result_dict: a dictionary of numpy arrays
    """
    if batch_index >= eval_config.num_visualizations:
      if 'original_image' in tensor_dict:
        tensor_dict = {k: v for (k, v) in tensor_dict.items()
                       if k != 'original_image'}
    try:
      (result_dict, _) = sess.run([tensor_dict, update_op])
      counters['success'] += 1
    except tf.errors.InvalidArgumentError:
      logging.info('Skipping image')
      counters['skipped'] += 1
      return {}
    global_step = tf.train.global_step(sess, slim.get_global_step())
    if batch_index < eval_config.num_visualizations:
      tag = 'image-{}'.format(batch_index)
      eval_util.visualize_detection_results(
          result_dict, tag, global_step, categories=categories,
          summary_dir=eval_dir,
          export_dir=eval_config.visualization_export_dir,
          show_groundtruth=eval_config.visualization_export_dir)
    return result_dict
Exemple #6
0
    def _process_batch(tensor_dict, sess, batch_index, counters):
        """Evaluates tensors in tensor_dict, visualizing the first K examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.

    Returns:
      result_dict: a dictionary of numpy arrays
    """
        try:
            result_dict = sess.run(tensor_dict)
            counters['success'] += 1
        except tf.errors.InvalidArgumentError:
            logging.info('Skipping image')
            counters['skipped'] += 1
            return {}
        global_step = tf.train.global_step(sess, tf.train.get_global_step())
        if batch_index < eval_config.num_visualizations:
            tag = 'image-{}'.format(batch_index)
            eval_util.visualize_detection_results(
                result_dict,
                tag,
                global_step,
                categories=categories,
                summary_dir=eval_dir,
                export_dir=eval_config.visualization_export_dir,
                show_groundtruth=False,
                max_num_predictions=None)
        return result_dict
Exemple #7
0
def eval_wrapper(original_image, filename, det_boxes, det_scores, det_classes, det_transcriptions, gt_boxes, gt_classes, gt_transcriptions, global_step):

  original_image = original_image

  tensor_dict = {}
  tensor_dict['original_image'] = original_image
  tensor_dict['filename'] = filename
  tensor_dict['detection_boxes'] = det_boxes
  tensor_dict['detection_scores'] = det_scores
  tensor_dict['detection_classes'] = det_classes
  tensor_dict['detection_transcriptions'] = det_transcriptions
  tensor_dict['groundtruth_boxes'] = gt_boxes
  tensor_dict['groundtruth_classes'] = gt_classes
  tensor_dict['groundtruth_transcriptions'] = gt_transcriptions
  tensor_dict['image_id'] = 'aaa'

  print gt_transcriptions
  gt_transcriptions_str = []
  for a in gt_transcriptions:
    gt_transcriptions_str += ["".join([chr(item) for item in a if item > 0])]
  print gt_transcriptions_str

  print det_transcriptions
  det_transcriptions_str = []
  for a in det_transcriptions:
    det_transcriptions_str += ["".join([chr(item) for item in a if item > 0])]
  print det_transcriptions_str

  print ''
  print 'eval wrapper'
  print filename
  print original_image.shape
  print det_boxes.shape
  print det_scores.shape
  print det_classes.shape
  print det_transcriptions.shape
  print gt_boxes.shape
  print gt_classes.shape
  print gt_transcriptions.shape
  print global_step
  sys.stdout.flush()

  categories = [{'id': 0, 'name': 'background'}, {'id': 1, 'name': 'text'}]
  
  eval_util.visualize_detection_results(tensor_dict, 'tag' + str(global_step), 
    global_step, 
    categories = categories, 
    summary_dir = '/home/zbychuj/Desktop/models/object_detection/models/eval',
    export_dir = '/home/zbychuj/Desktop/models/object_detection/models/eval',
    show_groundtruth = True,
    max_num_predictions = 100000,
    min_score_thresh=.5,
    gt_transcriptions = gt_transcriptions_str,
    det_transcriptions = det_transcriptions_str)

  #f = open('/home/zbychuj/Desktop/test_results/' + filename + '.txt', 'w')
  #for i in range(0, 64):
  #  f.write(str(det_scores[i]) + ',' + str(det_boxes[i][0]) + ',' + str(det_boxes[i][1]) + ',' + str(det_boxes[i][2]) + ',' + str(det_boxes[i][3]) + ',')
  #  f.write(str(det_transcriptions[i]) + '\n')
  #f.close()
  
  tensor_dict = {}
  tensor_dict['detection_boxes'] = [det_boxes]
  tensor_dict['detection_scores'] = [det_scores]
  tensor_dict['detection_classes'] = [det_classes]
  tensor_dict['groundtruth_boxes'] = [gt_boxes]
  tensor_dict['groundtruth_classes'] = [gt_classes]
  tensor_dict['image_id'] = ['aaa']
  #metrics = eval_util.evaluate_detection_results_pascal_voc(tensor_dict, categories, label_id_offset=1)
  #mAP = metrics['Precision/[email protected]']
  #print mAP

  print 'dupadupa'
  print 'dupadupa'
  print 'dupadupa'
  print 'dupadupa'
  sys.stdout.flush()

  return float(global_step)
Exemple #8
0
    def _process_batch(tensor_dict,
                       sess,
                       batch_index,
                       counters,
                       losses_dict=None):
        """Evaluates tensors in tensor_dict, losses_dict and visualizes examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
      losses_dict: Optional dictonary of scalar loss tensors.

    Returns:
      result_dict: a dictionary of numpy arrays
      result_losses_dict: a dictionary of scalar losses. This is empty if input
        losses_dict is None.
    """
        try:
            if not losses_dict:
                losses_dict = {}
            '''
      testing
      '''
            #   # tensor_dict = [n.name for n in tf.get_default_graph().as_graph_def().node]
            # test_layer = tf.get_default_graph().get_tensor_by_name('SecondStageFeatureExtractor/InceptionV2/Mixed_5b/Branch_1/Conv2d_0a_1x1/Relu:0')
            # test_layer_dict = {"test_layer":test_layer}
            # feature_dict, result_dict, result_losses_dict = sess.run([test_layer, tensor_dict, losses_dict])
            # # feature_dict = sess.run([test_layer, tensor_dict])
            # import numpy as np
            # import scipy.misc
            # feature_tensor = feature_dict[0]
            # origin_image = result_dict['original_image']
            # feature_map = np.sum(feature_tensor,axis=(2))
            # # feature_map /= feature_map.max()
            # # feature_map = 1 - feature_map
            # # rgb_feature_map = np.zeros((256, 256, 3))
            # # rgb_feature_map[..., 0] = feature_map * 30
            # # rgb_feature_map[..., 1] = feature_map * 144
            # # rgb_feature_map[..., 2] = feature_map * 255
            # # rgb_feature_map[..., 0] = np.where(feature_map==0, 30, 255)
            # # rgb_feature_map[..., 1] = np.where(feature_map==0, 144, 255)
            # # rgb_feature_map[..., 2] = np.where(feature_map==0, 255, 255)
            # origin_image  = origin_image[0]
            # # origin_image[..., 0] = np.where(origin_image[..., 0]==0, 30, 255)
            # # origin_image[..., 1] = np.where(origin_image[..., 1]==0, 144, 255)
            # # origin_image[..., 2] = np.where(origin_image[..., 2]==0, 255, 255)
            # scipy.misc.imsave('feature_map_{}.jpg'.format(counters['success']), feature_map)
            # # scipy.misc.imsave('origin_image_{}.jpg'.format(counters['success']), origin_image)
            # # from IPython import embed;embed()
            ''''''
            result_dict, result_losses_dict = sess.run(
                [tensor_dict, losses_dict])
            counters['success'] += 1
        except tf.errors.InvalidArgumentError:
            logging.info('Skipping image')
            counters['skipped'] += 1
            return {}, {}
        global_step = tf.train.global_step(sess, tf.train.get_global_step())
        if batch_index < eval_config.num_visualizations:
            tag = 'image-{}'.format(batch_index)
            eval_util.visualize_detection_results(
                eval_config.num_visualizations,
                result_dict,
                tag,
                global_step,
                categories=categories,
                summary_dir=eval_dir,
                export_dir=eval_config.visualization_export_dir,
                show_groundtruth=eval_config.visualize_groundtruth_boxes,
                groundtruth_box_visualization_color=eval_config.
                groundtruth_box_visualization_color,
                min_score_thresh=eval_config.min_score_threshold,
                max_num_predictions=eval_config.max_num_boxes_to_visualize,
                skip_scores=eval_config.skip_scores,
                skip_labels=eval_config.skip_labels,
                num_examples=eval_config.num_examples,
                keep_image_id_for_visualization_export=eval_config.
                keep_image_id_for_visualization_export)

        return result_dict, result_losses_dict