def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)
    tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

    with tf.Graph().as_default():
        image, image_size, resized_image_size = _create_input_tensors()

        model_options = common.ModelOptions(
            outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
            crop_size=FLAGS.crop_size,
            atrous_rates=FLAGS.atrous_rates,
            output_stride=FLAGS.output_stride)

        if tuple(FLAGS.inference_scales) == (1.0, ):
            tf.logging.info('Exported model performs single-scale inference.')
            predictions = model.predict_labels(
                image,
                model_options=model_options,
                image_pyramid=FLAGS.image_pyramid)
        else:
            tf.logging.info('Exported model performs multi-scale inference.')
            predictions = model.predict_labels_multi_scale(
                image,
                model_options=model_options,
                eval_scales=FLAGS.inference_scales,
                add_flipped_images=FLAGS.add_flipped_images)

        # Crop the valid regions from the predictions.
        semantic_predictions = tf.slice(
            predictions[common.OUTPUT_TYPE], [0, 0, 0],
            [1, resized_image_size[0], resized_image_size[1]])

        # Resize back the prediction to the original image size.
        def _resize_label(label, label_size):
            # Expand dimension of label to [1, height, width, 1] for resize operation.
            label = tf.expand_dims(label, 3)
            resized_label = tf.image.resize_images(
                label,
                label_size,
                method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                align_corners=True)
            return tf.squeeze(resized_label, 3)

        semantic_predictions = _resize_label(semantic_predictions, image_size)
        semantic_predictions = tf.identity(semantic_predictions,
                                           name=_OUTPUT_NAME)

        saver = tf.train.Saver(tf.model_variables())

        tf.gfile.MakeDirs(os.path.dirname(FLAGS.export_path))
        freeze_graph.freeze_graph_with_def_protos(
            tf.get_default_graph().as_graph_def(add_shapes=True),
            saver.as_saver_def(),
            FLAGS.checkpoint_path,
            _OUTPUT_NAME,
            restore_op_name=None,
            filename_tensor_name=None,
            output_graph=FLAGS.export_path,
            clear_devices=True,
            initializer_nodes=None)
Пример #2
0
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)
    # Get dataset-dependent information.
    dataset = segmentation_dataset.get_dataset(FLAGS.dataset,
                                               FLAGS.vis_split,
                                               dataset_dir=FLAGS.dataset_dir)
    train_id_to_eval_id = None
    if dataset.name == segmentation_dataset.get_cityscapes_dataset_name():
        tf.logging.info('Cityscapes requires converting train_id to eval_id.')
        train_id_to_eval_id = _CITYSCAPES_TRAIN_ID_TO_EVAL_ID

    # Prepare for visualization.
    tf.gfile.MakeDirs(FLAGS.vis_logdir)
    save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER)
    tf.gfile.MakeDirs(save_dir)
    raw_save_dir = os.path.join(FLAGS.vis_logdir,
                                _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER)
    tf.gfile.MakeDirs(raw_save_dir)

    tf.logging.info('Visualizing on %s set', FLAGS.vis_split)

    g = tf.Graph()
    with g.as_default():
        samples = input_generator.get(dataset,
                                      FLAGS.vis_crop_size,
                                      FLAGS.vis_batch_size,
                                      min_resize_value=FLAGS.min_resize_value,
                                      max_resize_value=FLAGS.max_resize_value,
                                      resize_factor=FLAGS.resize_factor,
                                      dataset_split=FLAGS.vis_split,
                                      is_training=False,
                                      model_variant=FLAGS.model_variant)

        model_options = common.ModelOptions(
            outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_classes},
            crop_size=FLAGS.vis_crop_size,
            atrous_rates=FLAGS.atrous_rates,
            output_stride=FLAGS.output_stride)

        if tuple(FLAGS.eval_scales) == (1.0, ):
            tf.logging.info('Performing single-scale test.')
            predictions = model.predict_labels(
                samples[common.IMAGE],
                model_options=model_options,
                image_pyramid=FLAGS.image_pyramid)
        else:
            tf.logging.info('Performing multi-scale test.')
            predictions = model.predict_labels_multi_scale(
                samples[common.IMAGE],
                model_options=model_options,
                eval_scales=FLAGS.eval_scales,
                add_flipped_images=FLAGS.add_flipped_images)
        predictions = predictions[common.OUTPUT_TYPE]

        if FLAGS.min_resize_value and FLAGS.max_resize_value:
            # Only support batch_size = 1, since we assume the dimensions of original
            # image after tf.squeeze is [height, width, 3].
            assert FLAGS.vis_batch_size == 1

            # Reverse the resizing and padding operations performed in preprocessing.
            # First, we slice the valid regions (i.e., remove padded region) and then
            # we reisze the predictions back.
            original_image = tf.squeeze(samples[common.ORIGINAL_IMAGE])
            original_image_shape = tf.shape(original_image)
            predictions = tf.slice(
                predictions, [0, 0, 0],
                [1, original_image_shape[0], original_image_shape[1]])
            resized_shape = tf.to_int32([
                tf.squeeze(samples[common.HEIGHT]),
                tf.squeeze(samples[common.WIDTH])
            ])
            predictions = tf.squeeze(
                tf.image.resize_images(
                    tf.expand_dims(predictions, 3),
                    resized_shape,
                    method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                    align_corners=True), 3)

        tf.train.get_or_create_global_step()
        saver = tf.train.Saver(slim.get_variables_to_restore())
        sv = tf.train.Supervisor(graph=g,
                                 logdir=FLAGS.vis_logdir,
                                 init_op=tf.global_variables_initializer(),
                                 summary_op=None,
                                 summary_writer=None,
                                 global_step=None,
                                 saver=saver)
        num_batches = int(
            math.ceil(dataset.num_samples / float(FLAGS.vis_batch_size)))
        last_checkpoint = None

        # Loop to visualize the results when new checkpoint is created.
        num_iters = 0
        while (FLAGS.max_number_of_iterations <= 0
               or num_iters < FLAGS.max_number_of_iterations):
            num_iters += 1
            last_checkpoint = slim.evaluation.wait_for_new_checkpoint(
                FLAGS.checkpoint_dir, last_checkpoint)
            start = time.time()
            tf.logging.info('Starting visualization at ' +
                            time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
            tf.logging.info('Visualizing with model %s', last_checkpoint)

            with sv.managed_session(FLAGS.master,
                                    start_standard_services=False) as sess:
                sv.start_queue_runners(sess)
                sv.saver.restore(sess, last_checkpoint)

                image_id_offset = 0
                for batch in range(num_batches):
                    tf.logging.info('Visualizing batch %d / %d', batch + 1,
                                    num_batches)
                    _process_batch(
                        sess=sess,
                        original_images=samples[common.ORIGINAL_IMAGE],
                        semantic_predictions=predictions,
                        image_names=samples[common.IMAGE_NAME],
                        image_heights=samples[common.HEIGHT],
                        image_widths=samples[common.WIDTH],
                        image_id_offset=image_id_offset,
                        save_dir=save_dir,
                        raw_save_dir=raw_save_dir,
                        train_id_to_eval_id=train_id_to_eval_id)
                    image_id_offset += FLAGS.vis_batch_size

            tf.logging.info('Finished visualization at ' +
                            time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
            time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
            if time_to_next_eval > 0:
                time.sleep(time_to_next_eval)
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)
    tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

    with tf.Graph().as_default():
        image, image_size, resized_image_size = _create_input_tensors()

        model_options = common.ModelOptions(
            outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
            crop_size=FLAGS.crop_size,
            atrous_rates=FLAGS.atrous_rates,
            output_stride=FLAGS.output_stride)

        if tuple(FLAGS.inference_scales) == (1.0, ):
            tf.logging.info('Exported model performs single-scale inference.')
            predictions = model.predict_labels(
                image,
                model_options=model_options,
                image_pyramid=FLAGS.image_pyramid)
        else:
            tf.logging.info('Exported model performs multi-scale inference.')
            if FLAGS.quantize_delay_step >= 0:
                raise ValueError(
                    'Quantize mode is not supported with multi-scale test.')
            predictions = model.predict_labels_multi_scale(
                image,
                model_options=model_options,
                eval_scales=FLAGS.inference_scales,
                add_flipped_images=FLAGS.add_flipped_images)
        raw_predictions = tf.identity(
            tf.cast(predictions[common.OUTPUT_TYPE], tf.float32),
            _RAW_OUTPUT_NAME)
        raw_probabilities = tf.identity(
            predictions[common.OUTPUT_TYPE + model.PROB_SUFFIX],
            _RAW_OUTPUT_PROB_NAME)

        # Crop the valid regions from the predictions.
        semantic_predictions = raw_predictions[:, :resized_image_size[0], :
                                               resized_image_size[1]]
        semantic_probabilities = raw_probabilities[:, :resized_image_size[0], :
                                                   resized_image_size[1]]

        # Resize back the prediction to the original image size.
        def _resize_label(label, label_size):
            # Expand dimension of label to [1, height, width, 1] for resize operation.
            label = tf.expand_dims(label, 3)
            resized_label = tf.image.resize_images(
                label,
                label_size,
                method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                align_corners=True)
            return tf.cast(tf.squeeze(resized_label, 3), tf.int32)

        semantic_predictions = _resize_label(semantic_predictions, image_size)
        semantic_predictions = tf.identity(semantic_predictions,
                                           name=_OUTPUT_NAME)

        semantic_probabilities = tf.image.resize_bilinear(
            semantic_probabilities,
            image_size,
            align_corners=True,
            name=_OUTPUT_PROB_NAME)

        if FLAGS.quantize_delay_step >= 0:
            contrib_quantize.create_eval_graph()

        saver = tf.train.Saver(tf.all_variables())

        dirname = os.path.dirname(FLAGS.export_path)
        tf.gfile.MakeDirs(dirname)
        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)
        freeze_graph.freeze_graph_with_def_protos(
            graph_def,
            saver.as_saver_def(),
            FLAGS.checkpoint_path,
            _OUTPUT_NAME + ',' + _OUTPUT_PROB_NAME,
            restore_op_name=None,
            filename_tensor_name=None,
            output_graph=FLAGS.export_path,
            clear_devices=True,
            initializer_nodes=None)

        if FLAGS.save_inference_graph:
            tf.train.write_graph(graph_def, dirname, 'inference_graph.pbtxt')
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Get dataset-dependent information.
  dataset = data_generator.Dataset(
      dataset_name=FLAGS.dataset,
      split_name=FLAGS.vis_split,
      dataset_dir=FLAGS.dataset_dir,
      batch_size=FLAGS.vis_batch_size,
      crop_size=[int(sz) for sz in FLAGS.vis_crop_size],
      min_resize_value=FLAGS.min_resize_value,
      max_resize_value=FLAGS.max_resize_value,
      resize_factor=FLAGS.resize_factor,
      model_variant=FLAGS.model_variant,
      is_training=False,
      should_shuffle=False,
      should_repeat=False)

  train_id_to_eval_id = None
  if dataset.dataset_name == data_generator.get_cityscapes_dataset_name():
    tf.logging.info('Cityscapes requires converting train_id to eval_id.')
    train_id_to_eval_id = _CITYSCAPES_TRAIN_ID_TO_EVAL_ID

  # Prepare for visualization.
  tf.gfile.MakeDirs(FLAGS.vis_logdir)
  save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER)
  tf.gfile.MakeDirs(save_dir)
  raw_save_dir = os.path.join(
      FLAGS.vis_logdir, _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER)
  tf.gfile.MakeDirs(raw_save_dir)

  tf.logging.info('Visualizing on %s set', FLAGS.vis_split)

  with tf.Graph().as_default():
    samples = dataset.get_one_shot_iterator().get_next()

    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes},
        crop_size=[int(sz) for sz in FLAGS.vis_crop_size],
        atrous_rates=FLAGS.atrous_rates,
        output_stride=FLAGS.output_stride)

    if tuple(FLAGS.eval_scales) == (1.0,):
      tf.logging.info('Performing single-scale test.')
      predictions = model.predict_labels(
          samples[common.IMAGE],
          model_options=model_options,
          image_pyramid=FLAGS.image_pyramid)
    else:
      tf.logging.info('Performing multi-scale test.')
      if FLAGS.quantize_delay_step >= 0:
        raise ValueError(
            'Quantize mode is not supported with multi-scale test.')
      predictions = model.predict_labels_multi_scale(
          samples[common.IMAGE],
          model_options=model_options,
          eval_scales=FLAGS.eval_scales,
          add_flipped_images=FLAGS.add_flipped_images)
    predictions = predictions[common.OUTPUT_TYPE]

    if FLAGS.min_resize_value and FLAGS.max_resize_value:
      # Only support batch_size = 1, since we assume the dimensions of original
      # image after tf.squeeze is [height, width, 3].
      assert FLAGS.vis_batch_size == 1

      # Reverse the resizing and padding operations performed in preprocessing.
      # First, we slice the valid regions (i.e., remove padded region) and then
      # we resize the predictions back.
      original_image = tf.squeeze(samples[common.ORIGINAL_IMAGE])
      original_image_shape = tf.shape(original_image)
      predictions = tf.slice(
          predictions,
          [0, 0, 0],
          [1, original_image_shape[0], original_image_shape[1]])
      resized_shape = tf.to_int32([tf.squeeze(samples[common.HEIGHT]),
                                   tf.squeeze(samples[common.WIDTH])])
      predictions = tf.squeeze(
          tf.image.resize_images(tf.expand_dims(predictions, 3),
                                 resized_shape,
                                 method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                                 align_corners=True), 3)

    tf.train.get_or_create_global_step()
    if FLAGS.quantize_delay_step >= 0:
      contrib_quantize.create_eval_graph()

    num_iteration = 0
    max_num_iteration = FLAGS.max_number_of_iterations

    checkpoints_iterator = contrib_training.checkpoints_iterator(
        FLAGS.checkpoint_dir, min_interval_secs=FLAGS.eval_interval_secs)
    for checkpoint_path in checkpoints_iterator:
      num_iteration += 1
      tf.logging.info(
          'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
                                                       time.gmtime()))
      tf.logging.info('Visualizing with model %s', checkpoint_path)

      scaffold = tf.train.Scaffold(init_op=tf.global_variables_initializer())
      session_creator = tf.train.ChiefSessionCreator(
          scaffold=scaffold,
          master=FLAGS.master,
          checkpoint_filename_with_path=checkpoint_path)
      with tf.train.MonitoredSession(
          session_creator=session_creator, hooks=None) as sess:
        batch = 0
        image_id_offset = 0

        while not sess.should_stop():
          tf.logging.info('Visualizing batch %d', batch + 1)
          _process_batch(sess=sess,
                         original_images=samples[common.ORIGINAL_IMAGE],
                         semantic_predictions=predictions,
                         image_names=samples[common.IMAGE_NAME],
                         image_heights=samples[common.HEIGHT],
                         image_widths=samples[common.WIDTH],
                         image_id_offset=image_id_offset,
                         save_dir=save_dir,
                         raw_save_dir=raw_save_dir,
                         train_id_to_eval_id=train_id_to_eval_id)
          image_id_offset += FLAGS.vis_batch_size
          batch += 1

      tf.logging.info(
          'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
                                                       time.gmtime()))
      if max_num_iteration > 0 and num_iteration >= max_num_iteration:
        break
Пример #5
0
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)

    dataset = data_generator.Dataset(
        dataset_name=FLAGS.dataset,
        split_name=FLAGS.eval_split,
        dataset_dir=FLAGS.dataset_dir,
        batch_size=FLAGS.eval_batch_size,
        crop_size=[int(sz) for sz in FLAGS.eval_crop_size],
        min_resize_value=FLAGS.min_resize_value,
        max_resize_value=FLAGS.max_resize_value,
        resize_factor=FLAGS.resize_factor,
        model_variant=FLAGS.model_variant,
        num_readers=2,
        is_training=False,
        should_shuffle=False,
        should_repeat=False)

    tf.gfile.MakeDirs(FLAGS.eval_logdir)
    tf.logging.info('Evaluating on %s set', FLAGS.eval_split)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)

    session_config = tf.ConfigProto(allow_soft_placement=True,
                                    log_device_placement=False,
                                    gpu_options=gpu_options)

    #session_config.gpu_options.allow_growth = True

    with tf.Graph().as_default():
        samples = dataset.get_one_shot_iterator().get_next()
        #print(samples[common.IMAGE_NAME])

        model_options = common.ModelOptions(
            outputs_to_num_classes={
                common.OUTPUT_TYPE: dataset.num_of_classes
            },
            crop_size=[int(sz) for sz in FLAGS.eval_crop_size],
            atrous_rates=FLAGS.atrous_rates,
            output_stride=FLAGS.output_stride)

        # Set shape in order for tf.contrib.tfprof.model_analyzer to work properly.

        samples[common.IMAGE].set_shape([
            FLAGS.eval_batch_size,
            int(FLAGS.eval_crop_size[0]),
            int(FLAGS.eval_crop_size[1]), 3
        ])
        if tuple(FLAGS.eval_scales) == (1.0, ):

            tf.logging.info('Performing single-scale test.')
            predictions, logits = model.predict_labels(
                samples[common.IMAGE],
                model_options,
                image_pyramid=FLAGS.image_pyramid,
                skips=FLAGS.skips)

        else:
            tf.logging.info('Performing multi-scale test.')
            if FLAGS.quantize_delay_step >= 0:
                raise ValueError(
                    'Quantize mode is not supported with multi-scale test.')

            predictions = model.predict_labels_multi_scale(
                samples[common.IMAGE],
                model_options=model_options,
                skips=FLAGS.skips,
                eval_scales=FLAGS.eval_scales,
                add_flipped_images=FLAGS.add_flipped_images)
        predictions = predictions[common.OUTPUT_TYPE]
        predictions = tf.reshape(predictions, shape=[-1])
        labels = tf.reshape(samples[common.LABEL], shape=[-1])
        weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label))

        # Set ignore_label regions to label 0, because metrics.mean_iou requires
        # range of labels = [0, dataset.num_classes). Note the ignore_label regions
        # are not evaluated since the corresponding regions contain weights = 0.
        labels = tf.where(tf.equal(labels, dataset.ignore_label),
                          tf.zeros_like(labels), labels)

        predictions_tag = 'miou'
        for eval_scale in FLAGS.eval_scales:
            predictions_tag += '_' + str(eval_scale)
        if FLAGS.add_flipped_images:
            predictions_tag += '_flipped'

        # Define the evaluation metric.

        metric_map = {}

        # to remove "predictions out of bound error"
        indices = tf.squeeze(
            tf.where(tf.less_equal(labels, dataset.num_of_classes - 1)), 1)
        labels_ind = tf.cast(tf.gather(labels, indices), tf.int32)
        predictions_ind = tf.gather(predictions, indices)
        # end of insert

        miou, update_miou = tf.metrics.mean_iou(labels_ind,
                                                predictions_ind,
                                                dataset.num_of_classes,
                                                weights=weights,
                                                name="mean_iou")
        tf.summary.scalar(predictions_tag, miou)

        # Define the evaluation metric IOU for individual classes
        iou_v, update_op = my_metrics.iou(labels_ind,
                                          predictions_ind,
                                          dataset.num_of_classes,
                                          weights=weights)
        for index in range(0, dataset.num_of_classes):
            metric_map['class_' + str(index) + '_iou'] = (iou_v[index],
                                                          update_op[index])
            tf.summary.scalar('class_' + str(index) + '_iou', iou_v[index])

        # Confusion matrix save hook. It updates the confusion matrix on tensorboard at the end of eval loop.
        confusionMatrixSaveHook = confusion_matrix.SaverHook(
            labels=['BG', 'water', 'ice', 'snow', 'clutter'],
            confusion_matrix_tensor_name='mean_iou/total_confusion_matrix',
            summary_writer=tf.summary.FileWriterCache.get(
                str(FLAGS.eval_logdir)))

        summary_op = tf.summary.merge_all()

        summary_hook = tf.contrib.training.SummaryAtEndHook(
            log_dir=FLAGS.eval_logdir, summary_op=summary_op)
        hooks = [summary_hook, confusionMatrixSaveHook]

        num_eval_iters = None
        if FLAGS.max_number_of_evaluations > 0:
            num_eval_iters = FLAGS.max_number_of_evaluations

        if FLAGS.quantize_delay_step >= 0:
            tf.contrib.quantize.create_eval_graph()

        tf.contrib.training.evaluate_repeatedly(
            master=FLAGS.master,
            checkpoint_dir=FLAGS.checkpoint_dir,
            eval_ops=[update_miou, update_op],
            max_number_of_evaluations=num_eval_iters,
            hooks=hooks,
            eval_interval_secs=FLAGS.eval_interval_secs)
Пример #6
0
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)
    # Get dataset-dependent information.
    dataset = segmentation_dataset.get_dataset(FLAGS.dataset,
                                               FLAGS.eval_split,
                                               dataset_dir=FLAGS.dataset_dir)

    tf.gfile.MakeDirs(FLAGS.eval_logdir)
    tf.logging.info('Evaluating on %s set', FLAGS.eval_split)

    with tf.Graph().as_default():
        samples = input_generator.get(dataset,
                                      FLAGS.eval_crop_size,
                                      FLAGS.eval_batch_size,
                                      min_resize_value=FLAGS.min_resize_value,
                                      max_resize_value=FLAGS.max_resize_value,
                                      resize_factor=FLAGS.resize_factor,
                                      dataset_split=FLAGS.eval_split,
                                      is_training=False,
                                      model_variant=FLAGS.model_variant)

        model_options = common.ModelOptions(
            outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_classes},
            crop_size=FLAGS.eval_crop_size,
            atrous_rates=FLAGS.atrous_rates,
            output_stride=FLAGS.output_stride)

        if tuple(FLAGS.eval_scales) == (1.0, ):
            tf.logging.info('Performing single-scale test.')
            predictions = model.predict_labels(
                samples[common.IMAGE],
                model_options,
                image_pyramid=FLAGS.image_pyramid)
        else:
            tf.logging.info('Performing multi-scale test.')
            predictions = model.predict_labels_multi_scale(
                samples[common.IMAGE],
                model_options=model_options,
                eval_scales=FLAGS.eval_scales,
                add_flipped_images=FLAGS.add_flipped_images)
        predictions = predictions[common.OUTPUT_TYPE]
        predictions = tf.reshape(predictions, shape=[-1])
        labels = tf.reshape(samples[common.LABEL], shape=[-1])
        weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label))

        # Set ignore_label regions to label 0, because metrics.mean_iou requires
        # range of labels = [0, dataset.num_classes). Note the ignore_label regions
        # are not evaluated since the corresponding regions contain weights = 0.
        labels = tf.where(tf.equal(labels, dataset.ignore_label),
                          tf.zeros_like(labels), labels)

        predictions_tag = 'miou'
        for eval_scale in FLAGS.eval_scales:
            predictions_tag += '_' + str(eval_scale)
        if FLAGS.add_flipped_images:
            predictions_tag += '_flipped'

        # Define the evaluation metric.
        metric_map = {}
        metric_map[predictions_tag] = tf.metrics.mean_iou(predictions,
                                                          labels,
                                                          dataset.num_classes,
                                                          weights=weights)

        metrics_to_values, metrics_to_updates = (
            tf.contrib.metrics.aggregate_metric_map(metric_map))

        for metric_name, metric_value in six.iteritems(metrics_to_values):
            slim.summaries.add_scalar_summary(metric_value,
                                              metric_name,
                                              print_summary=True)

        num_batches = int(
            math.ceil(dataset.num_samples / float(FLAGS.eval_batch_size)))

        tf.logging.info('Eval num images %d', dataset.num_samples)
        tf.logging.info('Eval batch size %d and num batch %d',
                        FLAGS.eval_batch_size, num_batches)

        num_eval_iters = None
        if FLAGS.max_number_of_evaluations > 0:
            num_eval_iters = FLAGS.max_number_of_evaluations
        slim.evaluation.evaluation_loop(
            master=FLAGS.master,
            checkpoint_dir=FLAGS.checkpoint_dir,
            logdir=FLAGS.eval_logdir,
            num_evals=num_batches,
            eval_op=list(metrics_to_updates.values()),
            max_number_of_evaluations=num_eval_iters,
            eval_interval_secs=FLAGS.eval_interval_secs)
Пример #7
0
def main(unused_argv):
    FLAGS.comb_dropout_keep_prob = 1.0
    FLAGS.image_keep_prob = 1.0
    FLAGS.elements_keep_prob = 1.0

    # Get dataset-dependent information.

    tf.gfile.MakeDirs(FLAGS.eval_logdir)
    tf.logging.info('Evaluating on %s set', FLAGS.split)

    with tf.Graph().as_default():
        samples = model_input.get_input_fn(FLAGS)()

        # Get model segmentation predictions.
        num_classes = model_input.dataset_descriptors[
            FLAGS.dataset].num_classes
        output_to_num_classes = model.get_output_to_num_classes(FLAGS)

        if tuple(FLAGS.eval_scales) == (1.0, ):
            tf.logging.info('Performing single-scale test.')
            predictions, probs = model.predict_labels(
                samples['image'],
                samples,
                FLAGS,
                outputs_to_num_classes=output_to_num_classes,
                image_pyramid=FLAGS.image_pyramid,
                merge_method=FLAGS.merge_method,
                atrous_rates=FLAGS.atrous_rates,
                add_image_level_feature=FLAGS.add_image_level_feature,
                aspp_with_batch_norm=FLAGS.aspp_with_batch_norm,
                aspp_with_separable_conv=FLAGS.aspp_with_separable_conv,
                multi_grid=FLAGS.multi_grid,
                depth_multiplier=FLAGS.depth_multiplier,
                output_stride=FLAGS.output_stride,
                decoder_output_stride=FLAGS.decoder_output_stride,
                decoder_use_separable_conv=FLAGS.decoder_use_separable_conv,
                crop_size=[FLAGS.image_size, FLAGS.image_size],
                logits_kernel_size=FLAGS.logits_kernel_size,
                model_variant=FLAGS.model_variant)
        else:
            tf.logging.info('Performing multi-scale test.')
            predictions, probs = model.predict_labels_multi_scale(
                samples['image'],
                samples,
                FLAGS,
                outputs_to_num_classes=output_to_num_classes,
                eval_scales=FLAGS.eval_scales,
                add_flipped_images=FLAGS.add_flipped_images,
                merge_method=FLAGS.merge_method,
                atrous_rates=FLAGS.atrous_rates,
                add_image_level_feature=FLAGS.add_image_level_feature,
                aspp_with_batch_norm=FLAGS.aspp_with_batch_norm,
                aspp_with_separable_conv=FLAGS.aspp_with_separable_conv,
                multi_grid=FLAGS.multi_grid,
                depth_multiplier=FLAGS.depth_multiplier,
                output_stride=FLAGS.output_stride,
                decoder_output_stride=FLAGS.decoder_output_stride,
                decoder_use_separable_conv=FLAGS.decoder_use_separable_conv,
                crop_size=[FLAGS.image_size, FLAGS.image_size],
                logits_kernel_size=FLAGS.logits_kernel_size,
                model_variant=FLAGS.model_variant)

        metric_map = {}
        for output in output_to_num_classes:
            output_predictions = predictions[output]
            output_probs = probs[output]
            if output == 'segment':
                output_predictions = tf.expand_dims(output_predictions, 3)
                if num_classes == 2:
                    labels = samples['label']

                    iou, weights = model.foreground_iou(
                        labels, output_predictions, FLAGS)
                    soft_iou, _ = model.foreground_iou(
                        labels, output_probs[:, :, :, 1:2], FLAGS)

                    metric_map['mIOU'] = tf.metrics.mean(iou)
                    metric_map['soft_mIOU'] = tf.metrics.mean(soft_iou)

                    high_prob_overlaps = calc_high_prob_overlaps(
                        labels, output_probs, weights)
                    metric_map['highestOverlaps'] = tf.metrics.mean(
                        high_prob_overlaps)

                    output_probs *= weights

                else:
                    output_predictions = tf.reshape(output_predictions,
                                                    shape=[-1])
                    labels = tf.reshape(samples['label'], shape=[-1])
                    weights = tf.to_float(
                        tf.not_equal(
                            labels, model_input.dataset_descriptors[
                                FLAGS.dataset].ignore_label))

                    # Set ignore_label regions to label 0, because metrics.mean_iou
                    # requires range of labels=[0, dataset.num_classes).
                    # Note the ignore_label regions are not evaluated since
                    # the corresponding regions contain weights=0.
                    labels = tf.where(
                        tf.equal(
                            labels, model_input.dataset_descriptors[
                                FLAGS.dataset].ignore_label),
                        tf.zeros_like(labels), labels)

                    predictions_tag = 'mIOU'
                    for eval_scale in FLAGS.eval_scales:
                        predictions_tag += '_' + str(eval_scale)
                    if FLAGS.add_flipped_images:
                        predictions_tag += '_flipped'

                    # Define the evaluation metric.
                    metric_map[predictions_tag] = slim.metrics.mean_iou(
                        output_predictions,
                        labels,
                        num_classes,
                        weights=weights)

                def label_summary(labels, weights, name):
                    tf.summary.image(
                        name,
                        tf.reshape(
                            tf.cast(
                                tf.to_float(labels * 255) /
                                tf.to_float(num_classes), tf.uint8) *
                            tf.cast(weights, tf.uint8),
                            [-1, FLAGS.image_size, FLAGS.image_size, 1]), 8)

                label_summary(labels, weights, 'label')
                label_summary(output_predictions, weights,
                              'output_predictions')
                tf.summary.image('logits',
                                 tf.expand_dims(output_probs[:, :, :, 1], 3))

            elif output == 'regression':
                labels = samples['label']
                ignore_mask = model.get_ignore_mask(labels, FLAGS)

                accurate = calc_accuracy_in_box(labels, output_probs,
                                                ignore_mask)
                metric_map['inBoxAccuracy'] = tf.metrics.mean(accurate)

        tf.summary.image('image', samples['image'], 8)

        metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map(
            metric_map)

        for metric_name, metric_value in metrics_to_values.iteritems():
            metric_value = tf.Print(metric_value, [metric_value], metric_name)
            tf.summary.scalar(metric_name, metric_value)

        num_batches = int(
            math.ceil(FLAGS.num_samples / float(FLAGS.batch_size)))

        tf.logging.info('Eval num images %d', FLAGS.num_samples)
        tf.logging.info('Eval batch size %d and num batch %d',
                        FLAGS.batch_size, num_batches)

        slim.evaluation.evaluation_loop(
            master='',
            checkpoint_dir=FLAGS.checkpoint_dir,
            logdir=FLAGS.eval_logdir,
            num_evals=num_batches,
            eval_op=metrics_to_updates.values(),
            summary_op=tf.summary.merge_all(),
            max_number_of_evaluations=None,
            eval_interval_secs=FLAGS.eval_interval_secs)
Пример #8
0
def main(unused_argv):
    # Get dataset-dependent information.
    # Prepare for visualization.
    tf.gfile.MakeDirs(FLAGS.vis_logdir)
    save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER)
    tf.gfile.MakeDirs(save_dir)
    raw_save_dir = os.path.join(FLAGS.vis_logdir,
                                _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER)
    tf.gfile.MakeDirs(raw_save_dir)
    num_vis_examples = FLAGS.num_vis_examples

    print('Visualizing on set', FLAGS.split)

    g = tf.Graph()
    with g.as_default():
        samples = model_input.get_input_fn(FLAGS)()
        outputs_to_num_classes = model.get_output_to_num_classes(FLAGS)

        # Get model segmentation predictions.
        if tuple(FLAGS.eval_scales) == (1.0, ):
            tf.logging.info('Performing single-scale test.')
            predictions, probs = model.predict_labels(
                samples['image'],
                samples,
                FLAGS,
                outputs_to_num_classes=outputs_to_num_classes,
                image_pyramid=FLAGS.image_pyramid,
                merge_method=FLAGS.merge_method,
                atrous_rates=FLAGS.atrous_rates,
                add_image_level_feature=FLAGS.add_image_level_feature,
                aspp_with_batch_norm=FLAGS.aspp_with_batch_norm,
                aspp_with_separable_conv=FLAGS.aspp_with_separable_conv,
                multi_grid=FLAGS.multi_grid,
                depth_multiplier=FLAGS.depth_multiplier,
                output_stride=FLAGS.output_stride,
                decoder_output_stride=FLAGS.decoder_output_stride,
                decoder_use_separable_conv=FLAGS.decoder_use_separable_conv,
                crop_size=[FLAGS.image_size, FLAGS.image_size],
                logits_kernel_size=FLAGS.logits_kernel_size,
                model_variant=FLAGS.model_variant)
        else:
            tf.logging.info('Performing multi-scale test.')
            predictions, probs = model.predict_labels_multi_scale(
                samples['image'],
                samples,
                FLAGS,
                outputs_to_num_classes=outputs_to_num_classes,
                eval_scales=FLAGS.eval_scales,
                add_flipped_images=FLAGS.add_flipped_images,
                merge_method=FLAGS.merge_method,
                atrous_rates=FLAGS.atrous_rates,
                add_image_level_feature=FLAGS.add_image_level_feature,
                aspp_with_batch_norm=FLAGS.aspp_with_batch_norm,
                aspp_with_separable_conv=FLAGS.aspp_with_separable_conv,
                multi_grid=FLAGS.multi_grid,
                depth_multiplier=FLAGS.depth_multiplier,
                output_stride=FLAGS.output_stride,
                decoder_output_stride=FLAGS.decoder_output_stride,
                decoder_use_separable_conv=FLAGS.decoder_use_separable_conv,
                crop_size=[FLAGS.image_size, FLAGS.image_size],
                logits_kernel_size=FLAGS.logits_kernel_size,
                model_variant=FLAGS.model_variant)

        if FLAGS.output_mode == 'segment':
            predictions = tf.squeeze(
                tf.cast(predictions[FLAGS.output_mode], tf.int32))
            probs = probs[FLAGS.output_mode]

            labels = tf.squeeze(tf.cast(samples['label'], tf.int32))
            weights = tf.cast(
                tf.not_equal(
                    labels, model_input.dataset_descriptors[
                        FLAGS.dataset].ignore_label), tf.int32)

            labels *= weights
            predictions *= weights

            tf.train.get_or_create_global_step()
            saver = tf.train.Saver(contrib_slim.get_variables_to_restore())
            sv = tf.train.Supervisor(graph=g,
                                     logdir=FLAGS.vis_logdir,
                                     init_op=tf.global_variables_initializer(),
                                     summary_op=None,
                                     summary_writer=None,
                                     global_step=None,
                                     saver=saver)
            num_batches = int(
                math.ceil(num_vis_examples / float(FLAGS.batch_size)))
            last_checkpoint = None

            # Infinite loop to visualize the results when new checkpoint is created.
            while True:
                last_checkpoint = contrib_slim.evaluation.wait_for_new_checkpoint(
                    FLAGS.checkpoint_dir, last_checkpoint)
                start = time.time()
                print('Starting visualization at ' +
                      time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
                print('Visualizing with model %s', last_checkpoint)

                print('Visualizing with model ', last_checkpoint)

                with sv.managed_session(FLAGS.master,
                                        start_standard_services=False) as sess:
                    # sv.start_queue_runners(sess)
                    sv.saver.restore(sess, last_checkpoint)

                    image_id_offset = 0
                    refs = []
                    for batch in range(num_batches):
                        print('Visualizing batch', batch + 1, num_batches)
                        refs.extend(
                            _process_batch(sess=sess,
                                           samples=samples,
                                           semantic_predictions=predictions,
                                           labels=labels,
                                           image_id_offset=image_id_offset,
                                           save_dir=save_dir))
                        image_id_offset += FLAGS.batch_size

            print('Finished visualization at ' +
                  time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
            time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
            if time_to_next_eval > 0:
                time.sleep(time_to_next_eval)
Пример #9
0
    FLAGS.dense_prediction_cell_json = './core/dense_prediction_cell_branch5_top1_cityscapes.json' if USE_DPC else ''
    chkpt_path = CHECKPOINT_PATH

    model_options = common.ModelOptions(
        outputs_to_num_classes=outputs_to_num_classes,
        crop_size=input_size[1:3],
        atrous_rates=None,
        output_stride=OUTPUT_STRIDE)

    g = tf.Graph()
    with g.as_default():
        with tf.Session(graph=g) as sess:
            inputs = tf.placeholder(
                tf.float32, input_size, name=input_tensor_name)
            outputs_to_scales_to_logits = model.predict_labels(
                inputs,
                model_options=model_options)
            predictions = tf.cast(
                outputs_to_scales_to_logits[common.OUTPUT_TYPE], tf.int32)
            output_tensor_name = predictions.name.split(':')[0]

            sess.run(tf.global_variables_initializer())
            if chkpt_path:
                saver = tf.train.Saver()
                saver.restore(sess, tf.train.latest_checkpoint(chkpt_path))

            constant_graph = tf.graph_util.convert_variables_to_constants(
                sess,  # The session is used to retrieve the weights
                # The graph_def is used to retrieve the nodes
                tf.get_default_graph().as_graph_def(),
                # The output node names are used to select the usefull nodes
Пример #10
0
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)

    dataset = data_generator.Dataset(
        dataset_name=FLAGS.dataset,
        split_name=FLAGS.eval_split,
        dataset_dir=FLAGS.dataset_dir,
        batch_size=FLAGS.eval_batch_size,
        crop_size=[int(sz) for sz in FLAGS.eval_crop_size],
        min_resize_value=FLAGS.min_resize_value,
        max_resize_value=FLAGS.max_resize_value,
        resize_factor=FLAGS.resize_factor,
        model_variant=FLAGS.model_variant,
        num_readers=2,
        is_training=False,
        should_shuffle=False,
        should_repeat=False)

    tf.gfile.MakeDirs(FLAGS.eval_logdir)
    tf.logging.info('Evaluating on %s set', FLAGS.eval_split)

    with tf.Graph().as_default():
        samples = dataset.get_one_shot_iterator().get_next()

        model_options = common.ModelOptions(
            outputs_to_num_classes={
                common.OUTPUT_TYPE: dataset.num_of_classes
            },
            crop_size=[int(sz) for sz in FLAGS.eval_crop_size],
            atrous_rates=FLAGS.atrous_rates,
            output_stride=FLAGS.output_stride)

        # Set shape in order for tf.contrib.tfprof.model_analyzer to work properly.
        samples[common.IMAGE].set_shape([
            FLAGS.eval_batch_size,
            int(FLAGS.eval_crop_size[0]),
            int(FLAGS.eval_crop_size[1]), 3
        ])
        if tuple(FLAGS.eval_scales) == (1.0, ):
            tf.logging.info('Performing single-scale test.')
            predictions = model.predict_labels(
                samples[common.IMAGE],
                model_options,
                image_pyramid=FLAGS.image_pyramid)
        else:
            tf.logging.info('Performing multi-scale test.')
            if FLAGS.quantize_delay_step >= 0:
                raise ValueError(
                    'Quantize mode is not supported with multi-scale test.')

            predictions = model.predict_labels_multi_scale(
                samples[common.IMAGE],
                model_options=model_options,
                eval_scales=FLAGS.eval_scales,
                add_flipped_images=FLAGS.add_flipped_images)
        predictions = predictions[common.OUTPUT_TYPE]
        predictions = tf.reshape(predictions, shape=[-1])
        labels = tf.reshape(samples[common.LABEL], shape=[-1])
        weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label))

        # Set ignore_label regions to label 0, because metrics.mean_iou requires
        # range of labels = [0, dataset.num_classes). Note the ignore_label regions
        # are not evaluated since the corresponding regions contain weights = 0.
        labels = tf.where(tf.equal(labels, dataset.ignore_label),
                          tf.zeros_like(labels), labels)

        predictions_tag = 'miou'
        for eval_scale in FLAGS.eval_scales:
            predictions_tag += '_' + str(eval_scale)
        if FLAGS.add_flipped_images:
            predictions_tag += '_flipped'

        # Define the evaluation metric.
        metric_map = {}
        num_classes = dataset.num_of_classes
        metric_map['eval/%s_overall' % predictions_tag] = tf.metrics.mean_iou(
            labels=labels,
            predictions=predictions,
            num_classes=num_classes,
            weights=weights)
        # IoU for each class.
        one_hot_predictions = tf.one_hot(predictions, num_classes)
        one_hot_predictions = tf.reshape(one_hot_predictions,
                                         [-1, num_classes])
        one_hot_labels = tf.one_hot(labels, num_classes)
        one_hot_labels = tf.reshape(one_hot_labels, [-1, num_classes])
        for c in range(num_classes):
            predictions_tag_c = '%s_class_%d' % (predictions_tag, c)
            tp, tp_op = tf.metrics.true_positives(
                labels=one_hot_labels[:, c],
                predictions=one_hot_predictions[:, c],
                weights=weights)
            fp, fp_op = tf.metrics.false_positives(
                labels=one_hot_labels[:, c],
                predictions=one_hot_predictions[:, c],
                weights=weights)
            fn, fn_op = tf.metrics.false_negatives(
                labels=one_hot_labels[:, c],
                predictions=one_hot_predictions[:, c],
                weights=weights)
            tp_fp_fn_op = tf.group(tp_op, fp_op, fn_op)
            iou = tf.where(tf.greater(tp + fn, 0.0), tp / (tp + fn + fp),
                           tf.constant(np.NaN))
            metric_map['eval/%s' % predictions_tag_c] = (iou, tp_fp_fn_op)

        (metrics_to_values,
         metrics_to_updates) = contrib_metrics.aggregate_metric_map(metric_map)

        summary_ops = []
        for metric_name, metric_value in six.iteritems(metrics_to_values):
            op = tf.summary.scalar(metric_name, metric_value)
            op = tf.Print(op, [metric_value], metric_name)
            summary_ops.append(op)

        summary_op = tf.summary.merge(summary_ops)
        summary_hook = contrib_training.SummaryAtEndHook(
            log_dir=FLAGS.eval_logdir, summary_op=summary_op)
        hooks = [summary_hook]

        num_eval_iters = None
        if FLAGS.max_number_of_evaluations > 0:
            num_eval_iters = FLAGS.max_number_of_evaluations

        if FLAGS.quantize_delay_step >= 0:
            contrib_quantize.create_eval_graph()

        contrib_tfprof.model_analyzer.print_model_analysis(
            tf.get_default_graph(),
            tfprof_options=contrib_tfprof.model_analyzer.
            TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
        contrib_tfprof.model_analyzer.print_model_analysis(
            tf.get_default_graph(),
            tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
        contrib_training.evaluate_repeatedly(
            checkpoint_dir=FLAGS.checkpoint_dir,
            master=FLAGS.master,
            eval_ops=list(metrics_to_updates.values()),
            max_number_of_evaluations=num_eval_iters,
            hooks=hooks,
            eval_interval_secs=FLAGS.eval_interval_secs)