Beispiel #1
0
    def _process_batch(tensor_dict,
                       sess,
                       batch_index,
                       counters,
                       losses_dict=None):
        """Evaluates tensors in tensor_dict, losses_dict and visualizes examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
      losses_dict: Optional dictonary of scalar loss tensors.

    Returns:
      result_dict: a dictionary of numpy arrays
      result_losses_dict: a dictionary of scalar losses. This is empty if input
        losses_dict is None.
    """
        try:
            if not losses_dict:
                losses_dict = {}
            result_dict, result_losses_dict = sess.run(
                [tensor_dict, losses_dict])
            counters['success'] += 1
        except tf.errors.InvalidArgumentError:
            logging.info('Skipping image')
            counters['skipped'] += 1
            return {}, {}
        global_step = tf.train.global_step(sess, tf.train.get_global_step())
        if batch_index < eval_config.num_visualizations:
            tag = 'image-{}'.format(batch_index)
            eval_util.visualize_detection_results(
                result_dict,
                tag,
                global_step,
                categories=categories,
                summary_dir=eval_dir,
                export_dir=eval_config.visualization_export_dir,
                show_groundtruth=eval_config.visualize_groundtruth_boxes,
                groundtruth_box_visualization_color=eval_config.
                groundtruth_box_visualization_color,
                min_score_thresh=eval_config.min_score_threshold,
                max_num_predictions=eval_config.max_num_boxes_to_visualize,
                skip_scores=eval_config.skip_scores,
                skip_labels=eval_config.skip_labels,
                keep_image_id_for_visualization_export=eval_config.
                keep_image_id_for_visualization_export)
        return result_dict, result_losses_dict
Beispiel #2
0
    def _process_batch(tensor_dict, sess, batch_index, counters, update_op):
        """Evaluates tensors in tensor_dict, visualizing the first K examples.

    This function calls sess.run on tensor_dict, evaluating the original_image
    tensor only on the first K examples and visualizing detections overlaid
    on this original_image.

    Args:
      tensor_dict: a dictionary of tensors
      sess: tensorflow session
      batch_index: the index of the batch amongst all batches in the run.
      counters: a dictionary holding 'success' and 'skipped' fields which can
        be updated to keep track of number of successful and failed runs,
        respectively.  If these fields are not updated, then the success/skipped
        counter values shown at the end of evaluation will be incorrect.
      update_op: An update op that has to be run along with output tensors. For
        example this could be an op to compute statistics for slim metrics.

    Returns:
      result_dict: a dictionary of numpy arrays
    """
        if batch_index >= eval_config.num_visualizations:
            if 'original_image' in tensor_dict:
                tensor_dict = {
                    k: v
                    for (k, v) in tensor_dict.items() if k != 'original_image'
                }
        try:
            (result_dict, _) = sess.run([tensor_dict, update_op])
            counters['success'] += 1
        except tf.errors.InvalidArgumentError:
            logging.info('Skipping image')
            counters['skipped'] += 1
            return {}
        global_step = tf.train.global_step(sess, slim.get_global_step())
        if batch_index < eval_config.num_visualizations:
            tag = 'image-{}'.format(batch_index)
            eval_util.visualize_detection_results(
                result_dict,
                tag,
                global_step,
                categories=categories,
                summary_dir=eval_dir,
                export_dir=eval_config.visualization_export_dir,
                show_groundtruth=eval_config.visualization_export_dir)
        return result_dict
Beispiel #3
0
    def _process_batch(tensor_dict,
                       sess,
                       batch_index,
                       counters,
                       losses_dict=None):
        """Evaluates tensors in tensor_dict, losses_dict and visualizes examples.

        This function calls sess.run on tensor_dict, evaluating the original_image
        tensor only on the first K examples and visualizing detections overlaid
        on this original_image.

        Args:
          tensor_dict: a dictionary of tensors
          sess: tensorflow session
          batch_index: the index of the batch amongst all batches in the run.
          counters: a dictionary holding 'success' and 'skipped' fields which can
            be updated to keep track of number of successful and failed runs,
            respectively.  If these fields are not updated, then the success/skipped
            counter values shown at the end of evaluation will be incorrect.
          losses_dict: Optional dictonary of scalar loss tensors.

        Returns:
          result_dict: a dictionary of numpy arrays
          result_losses_dict: a dictionary of scalar losses. This is empty if input
            losses_dict is None.
        """
        try:
            if not losses_dict:
                losses_dict = {}
            trace = False
            if batch_index == 0 and trace:
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
            else:
                run_options = None
                run_metadata = None
            start_time = time.time()
            result_dict, result_losses_dict = sess.run(
                [tensor_dict, losses_dict],
                options=run_options,
                run_metadata=run_metadata)
            if (batch_index % 100 == 0):
                logging.info('Step %d: %.3f sec', batch_index,
                             time.time() - start_time)
            if batch_index == 0 and trace:
                trace = timeline.Timeline(step_stats=run_metadata.step_stats)
                dir = 'logs'
                if not os.path.exists(dir):
                    os.makedirs(dir)
                with open(
                        dir + '/rfcn-timeline-' +
                        time.strftime("%Y%m%d-%H%M%S") + '.json', 'w') as file:
                    file.write(
                        trace.generate_chrome_trace_format(show_memory=False))
            counters['success'] += 1
        except tf.errors.InvalidArgumentError:
            logging.info('Skipping image')
            counters['skipped'] += 1
            return {}, {}
        global_step = tf.train.global_step(sess, tf.train.get_global_step())
        if batch_index < eval_config.num_visualizations:
            tag = 'image-{}'.format(batch_index)
            eval_util.visualize_detection_results(
                result_dict,
                tag,
                global_step,
                categories=categories,
                summary_dir=eval_dir,
                export_dir=eval_config.visualization_export_dir,
                show_groundtruth=eval_config.visualize_groundtruth_boxes,
                groundtruth_box_visualization_color=eval_config.
                groundtruth_box_visualization_color,
                min_score_thresh=eval_config.min_score_threshold,
                max_num_predictions=eval_config.max_num_boxes_to_visualize,
                skip_scores=eval_config.skip_scores,
                skip_labels=eval_config.skip_labels,
                keep_image_id_for_visualization_export=eval_config.
                keep_image_id_for_visualization_export)
        return result_dict, result_losses_dict