コード例 #1
0
    def after_run(self, run_context, run_values):
        results = run_values.results
        global_step = results.get('global_step')

        if self._draw_images:
            self._timer.update_last_triggered_step(global_step)
            prediction_dict = results.get('prediction_dict')
            if prediction_dict is not None:
                summaries = image_vis_summaries(prediction_dict,
                                                with_rcnn=self._with_rcnn)
                for summary in summaries:
                    self._summary_writer.add_summary(summary, global_step)

        self._next_step = global_step + 1
コード例 #2
0
    def after_run(self, run_context, run_values):
        results = run_values.results
        global_step = results.get('global_step')

        if self._draw_images:
            self._timer.update_last_triggered_step(global_step)
            prediction_dict = results.get('prediction_dict')
            if prediction_dict is not None:
                summaries = image_vis_summaries(
                    prediction_dict,
                    config=self._config,
                    image_visualization_mode=self._image_visualization_mode,
                    image=results.get('image'),
                    gt_bboxes=results.get('gt_bboxes'))
                if self._summary_writer is not None:
                    for summary in summaries:
                        self._summary_writer.add_summary(summary, global_step)

        self._next_step = global_step + 1
コード例 #3
0
ファイル: eval.py プロジェクト: czbiohub/luminoth
def evaluate_once(
    config,
    writer,
    saver,
    ops,
    checkpoint,
    class_labels,
    metrics_scope="metrics",
    image_vis=None,
    files_per_class=None,
    files_to_visualize=None,
):
    """Run the evaluation once.

    Create a new session with the previously-built graph, run it through the
    dataset, calculate the evaluation metrics and write the corresponding
    summaries.

    Args:
        config: Config object for the model.
        writer: Summary writers.
        saver: Saver object to restore checkpoint parameters.
        ops (dict): All the operations needed to successfully run the model.
            Expects the following keys: ``init_op``, ``metric_ops``,
            ``pred_objects``, ``pred_objects_classes``,
            ``pred_objects_scores``, ``train_objects``, ``losses``,
            ``train_image``.
        checkpoint (dict): Checkpoint-related data.
            Expects the following keys: ``global_step``, ``file``.
    """
    # Output of the detector, per batch.
    output_per_batch = {
        "bboxes": [],  # Bounding boxes detected.
        "classes": [],  # Class associated to each bounding box.
        "scores": [],  # Score for each detection.
        "gt_bboxes": [],  # Ground-truth bounding boxes for the batch.
        "gt_classes": [],  # Ground-truth classes for each bounding box.
    }

    with tf.Session() as sess:
        sess.run(ops["init_op"])
        saver.restore(sess, checkpoint["file"])

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        total_evaluated = 0
        start_time = time.time()

        try:
            track_start = start_time
            track_count = 0
            while not coord.should_stop():
                fetches = {
                    "metric_ops": ops["metric_ops"],
                    "bboxes": ops["pred_objects"],
                    "classes": ops["pred_objects_classes"],
                    "scores": ops["pred_objects_scores"],
                    "gt_bboxes": ops["train_objects"],
                    "losses": ops["losses"],
                    "filename": ops["filename"],
                }
                if image_vis is not None:
                    fetches["prediction_dict"] = ops["prediction_dict"]
                    fetches["train_image"] = ops["train_image"]

                batch_fetched = sess.run(fetches)
                output_per_batch["bboxes"].append(batch_fetched.get("bboxes"))
                output_per_batch["classes"].append(batch_fetched["classes"])
                output_per_batch["scores"].append(batch_fetched["scores"])

                batch_gt_objects = batch_fetched["gt_bboxes"]
                output_per_batch["gt_bboxes"].append(batch_gt_objects[:, :4])
                batch_gt_classes = batch_gt_objects[:, 4]
                output_per_batch["gt_classes"].append(batch_gt_classes)

                val_losses = batch_fetched["losses"]

                if image_vis is not None:
                    filename = batch_fetched["filename"].decode("utf-8")
                    visualize_file = False
                    for gt_class in batch_gt_classes:
                        cls_files = files_to_visualize.get(gt_class, set())
                        if len(cls_files) < files_per_class:
                            files_to_visualize.setdefault(gt_class,
                                                          set()).add(filename)
                            visualize_file = True
                            break
                        elif filename in cls_files:
                            visualize_file = True
                            break

                    if visualize_file:
                        image_summaries = image_vis_summaries(
                            batch_fetched["prediction_dict"],
                            config=config.model,
                            extra_tag=filename,
                            image_visualization_mode=image_vis,
                            image=batch_fetched["train_image"],
                            gt_bboxes=batch_fetched["gt_bboxes"],
                        )
                        for image_summary in image_summaries:
                            writer.add_summary(image_summary,
                                               checkpoint["global_step"])

                total_evaluated += 1
                track_count += 1

                track_end = time.time()
                if track_end - track_start > 20.0:
                    click.echo(
                        "{} processed in {:.2f}s (global {:.2f} images/s, "
                        "period {:.2f} images/s)".format(
                            total_evaluated,
                            track_end - start_time,
                            total_evaluated / (track_end - start_time),
                            track_count / (track_end - track_start),
                        ))
                    track_count = 0
                    track_start = track_end

        except tf.errors.OutOfRangeError:

            # Save final evaluation stats into summary under the checkpoint's
            # global step.
            ap_per_class, ar_per_class = calculate_metrics(
                output_per_batch, config.model.network.num_classes)

            map_at_50 = np.mean(ap_per_class[:, 0])
            map_at_75 = np.mean(ap_per_class[:, 5])
            map_at_range = np.mean(ap_per_class)
            mar_at_range = np.mean(ar_per_class)

            tf.logging.info("Finished evaluation at step {}.".format(
                checkpoint["global_step"]))
            tf.logging.info("Evaluated {} images.".format(total_evaluated))

            tf.logging.info(
                "Average Precision (AP) @ [0.50] = {:.3f}".format(map_at_50))
            tf.logging.info(
                "Average Precision (AP) @ [0.75] = {:.3f}".format(map_at_75))
            tf.logging.info(
                "Average Precision (AP) @ [0.50:0.95] = {:.3f}".format(
                    map_at_range))
            tf.logging.info(
                "Average Recall (AR) @ [0.50:0.95] = {:.3f}".format(
                    mar_at_range))

            for idx, val in enumerate(ap_per_class[:, 0]):
                class_label = ("{} ({})".format(class_labels[idx], idx)
                               if class_labels else idx)
                tf.logging.debug(
                    "Average Precision (AP) @ [0.50] for {} = {:.3f}".format(
                        class_label, val))

            summary = [
                tf.Summary.Value(tag="{}/[email protected]".format(metrics_scope),
                                 simple_value=map_at_50),
                tf.Summary.Value(tag="{}/[email protected]".format(metrics_scope),
                                 simple_value=map_at_75),
                tf.Summary.Value(
                    tag="{}/AP@[0.50:0.95]".format(metrics_scope),
                    simple_value=map_at_range,
                ),
                tf.Summary.Value(
                    tag="{}/AR@[0.50:0.95]".format(metrics_scope),
                    simple_value=mar_at_range,
                ),
                tf.Summary.Value(
                    tag="{}/total_evaluated".format(metrics_scope),
                    simple_value=total_evaluated,
                ),
                tf.Summary.Value(
                    tag="{}/evaluation_time".format(metrics_scope),
                    simple_value=time.time() - start_time,
                ),
            ]

            for loss_name, loss_value in val_losses.items():
                tf.logging.debug("{} loss = {:.4f}".format(
                    loss_name, loss_value))
                summary.append(
                    tf.Summary.Value(tag=loss_name, simple_value=loss_value))

            writer.add_summary(tf.Summary(value=summary),
                               checkpoint["global_step"])

        finally:
            coord.request_stop()

        # Wait for all threads to stop.
        coord.join(threads)
コード例 #4
0
def evaluate_once(config, writer, saver, ops, checkpoint,
                  metrics_scope='metrics', image_vis=None,
                  files_per_class=None, files_to_visualize=None):
    """Run the evaluation once.

    Create a new session with the previously-built graph, run it through the
    dataset, calculate the evaluation metrics and write the corresponding
    summaries.

    Args:
        config: Config object for the model.
        writer: Summary writers.
        saver: Saver object to restore checkpoint parameters.
        ops (dict): All the operations needed to successfully run the model.
            Expects the following keys: ``init_op``, ``metric_ops``,
            ``pred_objects``, ``pred_objects_classes``,
            ``pred_objects_scores``, ``train_objects``, ``losses``,
            ``train_image``.
        checkpoint (dict): Checkpoint-related data.
            Expects the following keys: ``global_step``, ``file``.
    """
    # Output of the detector, per batch.
    output_per_batch = {
        'bboxes': [],  # Bounding boxes detected.
        'classes': [],  # Class associated to each bounding box.
        'scores': [],  # Score for each detection.
        'gt_bboxes': [],  # Ground-truth bounding boxes for the batch.
        'gt_classes': [],  # Ground-truth classes for each bounding box.
    }

    with tf.Session() as sess:
        sess.run(ops['init_op'])
        saver.restore(sess, checkpoint['file'])

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            while not coord.should_stop():
                fetches = {
                    'metric_ops': ops['metric_ops'],
                    'bboxes': ops['pred_objects'],
                    'classes': ops['pred_objects_classes'],
                    'scores': ops['pred_objects_scores'],
                    'gt_bboxes': ops['train_objects']
                }
                if image_vis is not None:
                    fetches['prediction_dict'] = ops['prediction_dict']
                    fetches['filename'] = ops['filename']
                    fetches['train_image'] = ops['train_image']

                batch_fetched = sess.run(fetches)
                output_per_batch['bboxes'].append(batch_fetched.get('bboxes'))
                output_per_batch['classes'].append(batch_fetched['classes'])
                output_per_batch['scores'].append(batch_fetched['scores'])

                batch_gt_objects = batch_fetched['gt_bboxes']
                output_per_batch['gt_bboxes'].append(batch_gt_objects[:, :4])
                batch_gt_classes = batch_gt_objects[:, 4]
                output_per_batch['gt_classes'].append(batch_gt_classes)

                val_losses = sess.run(ops['losses'])

                if image_vis is not None:
                    filename = batch_fetched['filename'].decode('utf-8')
                    visualize_file = False
                    for gt_class in batch_gt_classes:
                        cls_files = files_to_visualize.get(
                            gt_class, set()
                        )
                        if len(cls_files) < files_per_class:
                            files_to_visualize.setdefault(
                                gt_class, set()
                            ).add(filename)
                            visualize_file = True
                            break
                        elif filename in cls_files:
                            visualize_file = True
                            break

                    if visualize_file:
                        image_summaries = image_vis_summaries(
                            batch_fetched['prediction_dict'],
                            with_rcnn=config.model.network.with_rcnn,
                            extra_tag=filename,
                            image_visualization_mode=image_vis,
                            image=batch_fetched['train_image'],
                            gt_bboxes=batch_fetched['gt_bboxes']
                        )
                        for image_summary in image_summaries:
                            writer.add_summary(
                                image_summary, checkpoint['global_step']
                            )

        except tf.errors.OutOfRangeError:

            # Save final evaluation stats into summary under the checkpoint's
            # global step.
            map_0_5, per_class_0_5 = calculate_map(
                output_per_batch, config.model.network.num_classes, 0.5
            )

            tf.logging.info('Finished evaluation at step {}.'.format(
                checkpoint['global_step']))
            tf.logging.info('[email protected] = {:.2f}'.format(map_0_5))

            # TODO: Find a way to generate these summaries automatically, or
            # less manually.
            summary = [
                tf.Summary.Value(
                    tag='{}/[email protected]'.format(metrics_scope),
                    simple_value=map_0_5
                ),
            ]

            for idx, val in enumerate(per_class_0_5):
                tf.logging.debug('[email protected] for {} = {:.2f}'.format(idx, val))
                summary.append(tf.Summary.Value(
                    tag='{}/[email protected]/{}'.format(metrics_scope, idx),
                    simple_value=val
                ))

            for loss_name, loss_value in val_losses.items():
                tf.logging.debug('{} loss = {:.4f}'.format(
                    loss_name, loss_value))
                summary.append(tf.Summary.Value(
                    tag=loss_name,
                    simple_value=loss_value
                ))

            total_bboxes_per_batch = [
                len(bboxes) for bboxes in output_per_batch['bboxes']
            ]

            summary.append(tf.Summary.Value(
                tag='{}/avg_bboxes'.format(metrics_scope),
                simple_value=np.mean(total_bboxes_per_batch)
            ))

            writer.add_summary(
                tf.Summary(value=summary), checkpoint['global_step']
            )

        finally:
            coord.request_stop()

        # Wait for all threads to stop.
        coord.join(threads)
コード例 #5
0
def evaluate_once(config, writer, saver, ops, checkpoint,
                  metrics_scope='metrics', image_vis=None,
                  files_per_class=None, files_to_visualize=None,
                  iou_threshold=0.5, min_probability=0.5):
    """Run the evaluation once.

    Create a new session with the previously-built graph, run it through the
    dataset, calculate the evaluation metrics and write the corresponding
    summaries.

    Args:
        config: Config object for the model.
        writer: Summary writers.
        saver: Saver object to restore checkpoint parameters.
        ops (dict): All the operations needed to successfully run the model.
            Expects the following keys: ``init_op``, ``metric_ops``,
            ``pred_objects``, ``pred_objects_classes``,
            ``pred_objects_scores``, ``train_objects``, ``losses``,
            ``train_image``.
        checkpoint (dict): Checkpoint-related data.
            Expects the following keys: ``global_step``, ``file``.
    """
    # Output of the detector, per batch.
    output_per_batch = {
        'bboxes': [],  # Bounding boxes detected.
        'classes': [],  # Class associated to each bounding box.
        'scores': [],  # Score for each detection.
        'gt_bboxes': [],  # Ground-truth bounding boxes for the batch.
        'gt_classes': [],  # Ground-truth classes for each bounding box.
    }

    with tf.Session() as sess:
        sess.run(ops['init_op'])
        saver.restore(sess, checkpoint['file'])

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        total_evaluated = 0
        start_time = time.time()

        try:
            track_start = start_time
            track_count = 0
            period_detected_count = 0
            period_detected_max = 0
            while not coord.should_stop():
                fetches = {
                    'metric_ops': ops['metric_ops'],
                    'bboxes': ops['pred_objects'],
                    'classes': ops['pred_objects_classes'],
                    'scores': ops['pred_objects_scores'],
                    'gt_bboxes': ops['train_objects'],
                    'losses': ops['losses'],
                    'filename': ops['filename'],
                }
                if image_vis is not None:
                    fetches['prediction_dict'] = ops['prediction_dict']
                    fetches['train_image'] = ops['train_image']

                batch_fetched = sess.run(fetches)
                output_per_batch['bboxes'].append(batch_fetched.get('bboxes'))
                output_per_batch['classes'].append(batch_fetched['classes'])
                output_per_batch['scores'].append(batch_fetched['scores'])

                batch_gt_objects = batch_fetched['gt_bboxes']
                output_per_batch['gt_bboxes'].append(batch_gt_objects[:, :4])
                batch_gt_classes = batch_gt_objects[:, 4]
                output_per_batch['gt_classes'].append(batch_gt_classes)

                val_losses = batch_fetched['losses']

                num_detected = len(batch_fetched['scores'])
                period_detected_count += num_detected
                period_detected_max = max(period_detected_max, num_detected)
                if period_detected_max == len(batch_fetched['scores']):
                    period_max_img = batch_fetched['filename']

                if image_vis is not None:
                    filename = batch_fetched['filename'].decode('utf-8')
                    visualize_file = False
                    for gt_class in batch_gt_classes:
                        cls_files = files_to_visualize.get(
                            gt_class, set()
                        )
                        if len(cls_files) < files_per_class:
                            files_to_visualize.setdefault(
                                gt_class, set()
                            ).add(filename)
                            visualize_file = True
                            break
                        elif filename in cls_files:
                            visualize_file = True
                            break

                    if visualize_file:
                        image_summaries = image_vis_summaries(
                            batch_fetched['prediction_dict'],
                            with_rcnn=config.model.network.with_rcnn,
                            extra_tag=filename,
                            image_visualization_mode=image_vis,
                            image=batch_fetched['train_image'],
                            gt_bboxes=batch_fetched['gt_bboxes']
                        )
                        for image_summary in image_summaries:
                            writer.add_summary(
                                image_summary, checkpoint['global_step']
                            )

                total_evaluated += 1
                track_count += 1

                track_end = time.time()
                if track_end - track_start > 20.:
                    click.echo(
                        '{} processed in {:.2f}s (global {:.2f} images/s, '
                        'period {:.2f} images/s, avg dets {:.2f}, '
                        'max dets {} on {})'.format(
                            total_evaluated, track_end - start_time,
                            total_evaluated / (track_end - start_time),
                            track_count / (track_end - track_start),
                            period_detected_count / track_count,
                            period_detected_max, period_max_img
                        ))
                    track_count = 0
                    track_start = track_end
                    period_detected_count = 0
                    period_detected_max = 0

        except tf.errors.OutOfRangeError:

            # Save final evaluation stats into summary under the checkpoint's
            # global step.
            map_at_iou, per_class_at_iou = calculate_map(
                output_per_batch, config.model.network.num_classes,
                iou_threshold
            )

            tf.logging.info('Finished evaluation at step {}.'.format(
                checkpoint['global_step']))
            tf.logging.info('Evaluated {} images.'.format(total_evaluated))
            tf.logging.info('mAP@{}@{} = {:.2f}'.format(
                iou_threshold, min_probability, map_at_iou))

            # TODO: Find a way to generate these summaries automatically, or
            # less manually.
            summary = [
                tf.Summary.Value(
                    tag='{}/mAP@{}@{}'.format(
                        metrics_scope, iou_threshold, min_probability
                    ),
                    simple_value=map_at_iou
                ),
                tf.Summary.Value(
                    tag='{}/total_evaluated'.format(metrics_scope),
                    simple_value=total_evaluated
                ),
                tf.Summary.Value(
                    tag='{}/evaluation_time'.format(metrics_scope),
                    simple_value=time.time() - start_time
                ),
            ]

            for idx, val in enumerate(per_class_at_iou):
                tf.logging.debug('AP@{}@{} for {} = {:.2f}'.format(
                    iou_threshold, min_probability, idx, val))
                summary.append(tf.Summary.Value(
                    tag='{}/AP@{}@{}/{}'.format(
                        metrics_scope, iou_threshold, min_probability, idx
                    ),
                    simple_value=val
                ))

            for loss_name, loss_value in val_losses.items():
                tf.logging.debug('{} loss = {:.4f}'.format(
                    loss_name, loss_value))
                summary.append(tf.Summary.Value(
                    tag=loss_name,
                    simple_value=loss_value
                ))

            total_bboxes_per_batch = [
                len(bboxes) for bboxes in output_per_batch['bboxes']
            ]

            summary.append(tf.Summary.Value(
                tag='{}/avg_bboxes'.format(metrics_scope),
                simple_value=np.mean(total_bboxes_per_batch)
            ))

            writer.add_summary(
                tf.Summary(value=summary), checkpoint['global_step']
            )

        finally:
            coord.request_stop()

        # Wait for all threads to stop.
        coord.join(threads)