Exemple #1
0
def _process_batch(sess, original_images, semantic_predictions, image_names,
                   image_heights, image_widths, save_dir):
    (original_images, semantic_predictions, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, image_names, image_heights,
         image_widths
     ])

    num_image = semantic_predictions.shape[0]
    i = 0
    image_height = np.squeeze(image_heights[i])
    image_width = np.squeeze(image_widths[i])
    original_image = np.squeeze(original_images[i])
    semantic_prediction = np.squeeze(semantic_predictions[i])
    crop_semantic_prediction = semantic_prediction[:image_height, :image_width]

    save_annotation.save_annotation(original_image,
                                    save_dir,
                                    _IMAGE_FORMAT.format(
                                        image_names[i].decode("utf-8")),
                                    add_colormap=False)

    save_annotation.save_annotation(crop_semantic_prediction,
                                    save_dir,
                                    _PREDICTION_FORMAT.format(
                                        image_names[i].decode("utf-8")),
                                    add_colormap=True,
                                    colormap_type=A_colormap_type)
Exemple #2
0
def _process_batch(sess, original_images, semantic_predictions, image_names,
                   image_heights, image_widths, image_id_offset, save_dir,
                   raw_save_dir, train_id_to_eval_id=None):
  """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
  (original_images,
   semantic_predictions,
   image_names,
   image_heights,
   image_widths) = sess.run([original_images, semantic_predictions,
                             image_names, image_heights, image_widths])

  num_image = semantic_predictions.shape[0]
  for i in range(num_image):
    image_height = np.squeeze(image_heights[i])
    image_width = np.squeeze(image_widths[i])
    original_image = np.squeeze(original_images[i])
    semantic_prediction = np.squeeze(semantic_predictions[i])
    crop_semantic_prediction = semantic_prediction[:image_height, :image_width]

    # Save image.
    # save_annotation.save_annotation(
    #     original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
    #     add_colormap=False)

    # Save prediction.
    # print("*"*20)
    # print(crop_semantic_prediction)
    # print("*"*20)
    # save_annotation.save_annotation(
    #     crop_semantic_prediction, save_dir,
    #     _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
    #     colormap_type=FLAGS.colormap_type)
    save_annotation.save_annotation_on_original_image([original_image,crop_semantic_prediction],
    save_dir, _PREDICTION_FORMAT % (image_id_offset + i),FLAGS.colormap_type)
    if FLAGS.also_save_raw_predictions:
      image_filename = os.path.basename(image_names[i])

      if train_id_to_eval_id is not None:
        crop_semantic_prediction = _convert_train_id_to_eval_id(
            crop_semantic_prediction,
            train_id_to_eval_id)
      save_annotation.save_annotation(
          crop_semantic_prediction, raw_save_dir, image_filename,
          add_colormap=False)
Exemple #3
0
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   raw_save_dir,
                   train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    (original_images, semantic_predictions, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, image_names, image_heights,
         image_widths
     ])

    num_image = semantic_predictions.shape[0]
    input('num_image: %d' % num_image)
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]

        input('save_dir: %s' % save_dir)
        # Save image.
        print(image_names[i].decode("utf-8"))
        save_annotation.save_annotation(original_image,
                                        save_dir,
                                        _IMAGE_FORMAT.format(
                                            image_names[i].decode("utf-8")),
                                        add_colormap=False)

        # Save prediction.
        save_annotation.save_annotation(crop_semantic_prediction,
                                        save_dir,
                                        _PREDICTION_FORMAT.format(
                                            image_names[i].decode("utf-8")),
                                        add_colormap=True,
                                        colormap_type=FLAGS.colormap_type)
Exemple #4
0
    def predict(self, input_npy, save_path=None):
        start = time.time()
        semantic_predictions_value, logits_predictions_value = self.sess.run(
            [self.semantic_predictions, self.logits],
            feed_dict={self.image_placeholder: input_npy}
        )
        print("Time needed: {:.2f}s".format(time.time()-start))
        if save_path != None:
            save_annotation.save_annotation(semantic_predictions_value, os.path.dirname(save_path),
                                            os.path.splitext(os.path.basename(save_path))[0], add_colormap=False)

        return semantic_predictions_value, logits_predictions_value
Exemple #5
0
def _process_batch(sess,
                   original_images,
                   resize_images,
                   image_names,
                   mask,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   raw_save_dir,
                   train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    (original_images, resize_images, image_names, mask, image_heights,
     image_widths) = sess.run([
         original_images, resize_images, image_names, mask, image_heights,
         image_widths
     ])

    print(original_images.shape)
    print(mask.shape)

    # Save image.
    save_annotation.save_annotation(resize_images,
                                    save_dir,
                                    _IMAGE_FORMAT % (image_id_offset),
                                    add_colormap=False)

    # Save prediction.
    save_annotation.save_annotation(mask,
                                    save_dir,
                                    _PREDICTION_FORMAT % (image_id_offset),
                                    add_colormap=True,
                                    colormap_type=FLAGS.colormap_type)

    write_file(
        os.path.join(save_dir,
                     _PREDICTION_FORMAT % (image_id_offset) + ".csv"), mask)
Exemple #6
0
def _process_batch(sess, original_images, semantic_predictions, image_names,
                   image_heights, image_widths, image_id_offset, save_dir,
                   raw_save_dir, train_id_to_eval_id=None):
  """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
  (original_images,
   semantic_predictions,
   image_names,
   image_heights,
   image_widths) = sess.run([original_images, semantic_predictions,
                             image_names, image_heights, image_widths])

  num_image = semantic_predictions.shape[0]
  for i in range(num_image):
    image_height = np.squeeze(image_heights[i])
    image_width = np.squeeze(image_widths[i])
    original_image = np.squeeze(original_images[i])
    semantic_prediction = np.squeeze(semantic_predictions[i])
    crop_semantic_prediction = semantic_prediction[:image_height, :image_width]

    # Save image.
    save_annotation.save_annotation(
        original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
        add_colormap=False)

    # Save prediction.
    save_annotation.save_annotation(
        crop_semantic_prediction, save_dir,
        _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
        colormap_type=FLAGS.colormap_type)

    if FLAGS.also_save_raw_predictions:
      image_filename = os.path.basename(image_names[i])

      if train_id_to_eval_id is not None:
        crop_semantic_prediction = _convert_train_id_to_eval_id(
            crop_semantic_prediction,
            train_id_to_eval_id)
      save_annotation.save_annotation(
          crop_semantic_prediction, raw_save_dir, image_filename,
          add_colormap=False)
Exemple #7
0
def _process_batch(sess, original_images, semantic_predictions,
                   instance_predictions, regression_predictions,
                   panoptic_prediction, image_names, image_heights,
                   image_widths, image_id_offset, save_dir, instance_save_dir,
                   instance_seg_save_dir, panoptic_save_dir,
                   instance_offset_save_dir):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    instance_predictions: One batch of instance predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    instance_save_dir : The directory where the instance predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    (original_images, semantic_predictions, instance_predictions,
     regression_predictions, instance_segmentation, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, instance_predictions,
         regression_predictions, panoptic_prediction, image_names,
         image_heights, image_widths
     ])

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        instance_predictions = np.squeeze(instance_predictions[i])
        regression_predictions = np.squeeze(regression_predictions[i])

        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]
        crop_instance_prediction = instance_predictions[:image_height, :
                                                        image_width]
        crop_regression_prediction = regression_predictions[:image_height, :
                                                            image_width, :]

        instance_segmentation = np.squeeze(instance_segmentation)
        unique_elements = np.unique(instance_segmentation)

        instance_segmentation_scaled = np.array(instance_segmentation) * (
            255 // len(unique_elements))

        #################### Heatmaps ############################

        crop_instance_prediction = np.multiply(
            crop_instance_prediction / np.amax(crop_instance_prediction), 255)
        #crop_instance_prediction_mask = np.greater_equal(crop_instance_prediction, 100)
        crop_instance_prediction_mask = np.greater_equal(
            crop_instance_prediction, 10)
        crop_instance_prediction = crop_instance_prediction * crop_instance_prediction_mask

        mask_single_channel = np.not_equal(crop_instance_prediction, 0)
        mask_rgb = np.dstack(
            (mask_single_channel, mask_single_channel, mask_single_channel))

        instance_heatmap = cv2.applyColorMap(
            crop_instance_prediction.astype('uint8'), cv2.COLORMAP_HSV)
        #instance_heatmap = Image.blend(Image.fromarray(original_image), Image.fromarray(instance_heatmap), 0.2)
        instance_heatmap = Image.fromarray(
            np.where(mask_rgb, instance_heatmap, original_image))

        ##################### Offset Vectors Prediction #########################

        red, green = np.dsplit(crop_regression_prediction, 2)
        blue = np.zeros_like(red)
        crop_regression_prediction = np.dstack((red, green, blue))
        crop_regression_prediction = np.multiply(
            np.divide(crop_regression_prediction,
                      np.amax(crop_regression_prediction)), 255)
        ##########  VIS PANOPTIC OUTPUT ##################

        inst_color = cv2.applyColorMap(
            instance_segmentation_scaled.astype('uint8'), cv2.COLORMAP_JET)
        instance_segmentation_coloured = Image.blend(
            Image.fromarray(original_image), Image.fromarray(inst_color), 0.4)

        # For Creating boundries around instances
        # Add boundry to Image
        colormap_type = FLAGS.colormap_type
        instance_boundry = np.zeros_like(semantic_prediction)
        instances = np.delete(unique_elements, 0)

        for index, i in enumerate(instances):
            local_instance_mask = instance_segmentation == i
            kernel = np.ones((5, 5), np.uint8)
            dilation = cv2.dilate(local_instance_mask.astype('uint8'),
                                  kernel,
                                  iterations=1)
            erosion = cv2.erode(local_instance_mask.astype('uint8'),
                                kernel,
                                iterations=1)
            boundry = (dilation - erosion) * 255
            instance_boundry += boundry

        colored_label = get_dataset_colormap.label_to_color_image(
            semantic_prediction.astype('uint8'), colormap_type)
        colored_label = colored_label + np.dstack(
            (instance_boundry, instance_boundry, instance_boundry))
        colored_label = Image.fromarray(colored_label.astype(dtype=np.uint8))

        panoptic_output = Image.blend(Image.fromarray(original_image),
                                      colored_label, 0.7)

        # Save image.
        save_annotation.save_annotation(original_image,
                                        save_dir,
                                        _IMAGE_FORMAT % (image_id_offset + i),
                                        add_colormap=False)

        # Save instance heatmap prediction.
        save_annotation.save_annotation_heatmaps(
            instance_heatmap,
            instance_save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i),
            scale_values=False,
            add_colormap=False,
            colormap_type=FLAGS.colormap_type)

        # Save regression prediction.
        save_annotation.save_annotation_instance_segmentation(
            instance_segmentation_coloured,
            instance_seg_save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i),
            normalize_values=False,
            add_colormap=False,
            colormap_type=FLAGS.colormap_type)

        # Save prediction.
        save_annotation.save_annotation_overlayed(
            crop_semantic_prediction,
            original_image,
            save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i),
            add_colormap=True,
            colormap_type=FLAGS.colormap_type)

        # Save panoptic prediction.
        save_annotation.save_annotation_panoptic(
            panoptic_output,
            panoptic_save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i),
            add_colormap=False,
            colormap_type=FLAGS.colormap_type)

        # Save instance_segmentation prediction.
        save_annotation.save_annotation_offset_vectors(
            crop_regression_prediction, instance_offset_save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i))
Exemple #8
0
def _process_batch(sess, original_images, labels, semantic_predictions, image_names,
                   image_heights, image_widths, perf_metrics, image_id_offset, save_dir,
                   raw_save_dir, train_id_to_eval_id=None):
  """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """

  global perf_all
  global filename_list

  (original_images,
    labels,
   semantic_predictions,
   image_names,
   image_heights,
   image_widths,
   m_iou,
   m_accu,
   accu
   ) = sess.run([original_images, labels, semantic_predictions,
                image_names, image_heights, image_widths, *perf_metrics])

  print(m_iou, m_accu, accu)

  num_image = semantic_predictions.shape[0]
  for i in range(num_image):
    name = image_names[i].decode('utf-8')
    idx = filename_list.index(name)
    perf_all[idx, 0] = m_iou
    perf_all[idx, 1] = m_accu
    perf_all[idx, 2] = accu
    idx += 1

    image_height = np.squeeze(image_heights[i])
    image_width = np.squeeze(image_widths[i])
    original_image = np.squeeze(original_images[i])
    # label = np.squeeze(labels[i])
    semantic_prediction = np.squeeze(semantic_predictions[i])
    crop_semantic_prediction = semantic_prediction[:image_height, :image_width]

    # Save image.
    # save_annotation.save_annotation(
    #     original_image, save_dir, _IMAGE_FORMAT % idx,
    #     add_colormap=False)

    # save_annotation.save_annotation(
    #     label, save_dir,
    #     _LABEL_FORMAT % idx, add_colormap=True,
    #     colormap_type=FLAGS.colormap_type)

    # Save prediction.
    save_annotation.save_annotation(
        crop_semantic_prediction, save_dir,
        _PREDICTION_FORMAT % idx, add_colormap=True,
        colormap_type=FLAGS.colormap_type)

    if FLAGS.also_save_raw_predictions:
      image_filename = _PREDICTION_FORMAT % idx

      if train_id_to_eval_id is not None:
        crop_semantic_prediction = _convert_train_id_to_eval_id(
            crop_semantic_prediction,
            train_id_to_eval_id)
      save_annotation.save_annotation(
          crop_semantic_prediction, raw_save_dir, image_filename,
          add_colormap=False)
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   raw_save_dir,
                   stacked_save_dir,
                   train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

    Args:
      sess: TensorFlow session.
      original_images: One batch of original images.
      semantic_predictions: One batch of semantic segmentation predictions.
      image_names: Image names.
      image_heights: Image heights.
      image_widths: Image widths.
      image_id_offset: Image id offset for indexing images.
      save_dir: The directory where the predictions will be saved.
      raw_save_dir: The directory where the raw predictions will be saved.
      train_id_to_eval_id: A list mapping from train id to eval id.
    """
    (original_images, semantic_predictions, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, image_names, image_heights,
         image_widths
     ])

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]

        # Save image.

        # save_annotation.save_annotation(
        #     original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
        #     add_colormap=False)

        image_filename = os.path.splitext(os.path.basename(image_names[i]))[0]
        # print('image_filename: ', image_filename)

        stacked_path = os.path.join(stacked_save_dir, image_filename + '.jpg')
        mask_img = (crop_semantic_prediction *
                    (255.0 / np.max(crop_semantic_prediction))).astype(
                        np.uint8)
        mask_img = cv2.cvtColor(mask_img, cv2.COLOR_GRAY2BGR)
        stacked_img = np.concatenate((original_image, mask_img), axis=1)
        cv2.imwrite(stacked_path, stacked_img)

        if train_id_to_eval_id is not None:
            crop_semantic_prediction = _convert_train_id_to_eval_id(
                crop_semantic_prediction, train_id_to_eval_id)
        save_annotation.save_annotation(crop_semantic_prediction,
                                        raw_save_dir,
                                        image_filename,
                                        add_colormap=False)

        # Save prediction.
        if FLAGS.also_save_vis_predictions:
            save_annotation.save_annotation(
                crop_semantic_prediction,
                save_dir,
                image_filename,
                # _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
                colormap_type=FLAGS.colormap_type)
Exemple #10
0
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   raw_save_dir,
                   logit_save_dir,
                   uncertainty_save_dir,
                   train_id_to_eval_id=None,
                   save_logits=False,
                   logits=None,
                   fixed_features=None,
                   extra_to_run={},
                   samples_placeholders=None,
                   samples_orig=None,
                   compute_uncertainty=False,
                   num_forward_passes=1):
    """Evaluates one single batch qualitatively.

    Args:
      sess: TensorFlow session.
      original_images: One batch of original images.
      semantic_predictions: One batch of semantic segmentation predictions.
      image_names: Image names.
      image_heights: Image heights.
      image_widths: Image widths.
      image_id_offset: Image id offset for indexing images.
      save_dir: The directory where the predictions will be saved.
      raw_save_dir: The directory where the raw predictions will be saved.
      train_id_to_eval_id: A list mapping from train id to eval id.

      Input generator and network are decoupled so that multiple passes through the network can
        made with the same batch. Useful for MC dropout (computing uncertainty).
      samples_orig: run this to get a new batch from the input generator
      samples_placeholders: set the tensor values to data in samples_orig batch.
                            predictions, etc are run with samples_placeholders as the input.

      To compute uncertainty by only running later layers of network...
      fixed_features: tensor of fixed features from network which only needs to be computed once
      extra_to_run[accumulated_softmax, accumulated_softmax_sq]: accumulated softmax from fixed feature logits. Requires samples_placeholders['fixed_features'] to be set.
    """

    ####
    def softmax(arr):
        return np.exp(arr) / (np.sum(np.exp(arr), axis=-1)[..., np.newaxis])

    ####

    # Run samples_orig to get samples
    samples = sess.run([samples_orig])[0]

    # Map placeholders values to samples
    feed_dict = {
        v: samples[k]
        for k, v in samples_placeholders.items() if k in samples.keys()
    }

    # Skip processing if already processed.
    image_names_ = sess.run([image_names], feed_dict=feed_dict)[0]
    assert len(image_names_) == 1
    image_filename = os.path.basename(
        image_names_[0]).split('.png')[0].split('.jpg')[0]
    savename = os.path.join(save_dir, image_filename + ".png")
    if os.path.exists(savename) and not FLAGS.overwrite_vis:
        print("  {} exists.".format(savename))
        print(">>> SKIPPING <<<<")
        return

    # Build tensors run list
    to_run = [original_images, image_names, image_heights, image_widths]

    semantic_predictions_idx = None
    if semantic_predictions is not None:
        semantic_predictions_idx = len(to_run)
        to_run.append(semantic_predictions)
    logits_idx = None
    if logits is not None:
        logits_idx = len(to_run)
        to_run.append(logits)
    fixed_features_idx = None
    if fixed_features is not None:
        fixed_features_idx = len(to_run)
        to_run.append(fixed_features)

    run_output = sess.run(to_run, feed_dict=feed_dict)

    # Gather run outputs
    original_images = run_output[0]
    image_names = run_output[1]
    image_heights = run_output[2]
    image_widths = run_output[3]
    if semantic_predictions is not None:
        semantic_predictions = run_output[semantic_predictions_idx]
    if logits is not None:
        logits_i = run_output[logits_idx]
    if fixed_features is not None:
        fixed_features = run_output[fixed_features_idx]

    # pred mean softmax_logits is mu = sum(softmax_logits, axis=newaxis) / num_forward_passes
    # pred var semantic_pred is sigma = sqrt(1 / (num_forward_passes - 1)) sqrt(
    #                          sum(softmax_logits**2, axis=newaxis)
    #                           - 2*mu*sum(softmax_logits, axis=newaxis)
    #                           + num_forward_passes * mu**2)
    # sum( (y - mu)^2 ) = sum(y**2) - 2mu*sum(y) + 2*N*mu**2
    if compute_uncertainty:

        if fixed_features is not None:
            feed_dict = {
                samples_placeholders['fixed_features']:
                fixed_features,
                samples_placeholders['image']:
                samples['image'],
                #samples_placeholders['accumulated_softmax']: np.zeros((1,1025, 2049, 19)),
                #samples_placeholders['accumulated_softmax_sq']: np.zeros((1,1025, 2049, 19)),
                #samples_placeholders['accumulated_softmax']: np.zeros((1,513, 513, 23)),
                #samples_placeholders['accumulated_softmax_sq']: np.zeros((1,513, 513, 23)),
                samples_placeholders['accumulated_softmax']:
                np.zeros(FLAGS.vis_placeholder_size),
                samples_placeholders['accumulated_softmax_sq']:
                np.zeros(FLAGS.vis_placeholder_size),
            }

            for i in tqdm.tqdm(range(num_forward_passes)):
                (accumulated_softmax,
                 accumulated_softmax_sq) = sess.run([
                     extra_to_run['accumulated_softmax'],
                     extra_to_run['accumulated_softmax_sq']
                 ],
                                                    feed_dict=feed_dict)
                feed_dict[samples_placeholders[
                    'accumulated_softmax']] = accumulated_softmax
                feed_dict[samples_placeholders[
                    'accumulated_softmax_sq']] = accumulated_softmax_sq

        else:

            assert not save_logits, "Do not save logits when computing uncertainty."
            assert logits is not None, "Logits are required to compute uncertainty."

            feed_dict.update({
                #samples_placeholders['accumulated_softmax']: np.zeros((1,1025, 2049, 19)),
                #samples_placeholders['accumulated_softmax_sq']: np.zeros((1,1025, 2049, 19)),
                #samples_placeholders['accumulated_softmax']: np.zeros((1,513, 513, 23)),
                #samples_placeholders['accumulated_softmax_sq']: np.zeros((1,513, 513, 23)),
                samples_placeholders['accumulated_softmax']:
                np.zeros(FLAGS.vis_placeholder_size),
                samples_placeholders['accumulated_softmax_sq']:
                np.zeros(FLAGS.vis_placeholder_size),
            })

            # Run forward passes and compute softmax mean and variances.
            # TODO: This is numerically unstable.
            print(
                '    Accumulating {} forward passes to compute uncertainty...'.
                format(num_forward_passes))
            for i in tqdm.tqdm(range(num_forward_passes)):
                #start_time_log = time.time()
                (accumulated_softmax,
                 accumulated_softmax_sq) = sess.run([
                     extra_to_run['accumulated_softmax'],
                     extra_to_run['accumulated_softmax_sq']
                 ],
                                                    feed_dict=feed_dict)
                feed_dict[samples_placeholders[
                    'accumulated_softmax']] = accumulated_softmax
                feed_dict[samples_placeholders[
                    'accumulated_softmax_sq']] = accumulated_softmax_sq

        #start_time = time.time()
        pred_mean = 1.0 * accumulated_softmax / num_forward_passes
        pred_var = accumulated_softmax_sq -\
                    2 * pred_mean * accumulated_softmax +\
                    num_forward_passes * pred_mean**2
        pred_var = np.abs(
            pred_var
        )  # deal with negative values due to precision error. These values be small (ie 0)
        pred_var = np.sqrt((1.0 / (num_forward_passes - 1)) * pred_var)

        #print('        Time elapsed for computing mean and var: {}'.format(time.time() - start_time))
        #print(pred_var)

        # Overwrite semantic_predictions with mean prediction
        print('      Computing pred_mean_argmax...')
        start_time = time.time()
        pred_mean_argmax = np.argmax(pred_mean, axis=-1)
        #semantic_predictions_orig = semantic_predictions
        semantic_predictions = pred_mean_argmax
        #print(np.where(semantic_predictions != semantic_predictions_orig))
        #assert np.all(semantic_predictions == semantic_predictions_orig)
        print('        Time elapsed: {}'.format(time.time() - start_time))

        print('      Computing pred_var corresponding to pred_mean_argmax...')
        start_time = time.time()
        prediction_variances = np.zeros_like(pred_mean_argmax,
                                             dtype=pred_var.dtype)
        for ii in range(pred_var.shape[0]):
            for jj in range(pred_var.shape[1]):
                for kk in range(pred_var.shape[2]):
                    prediction_variances[ii, jj,
                                         kk] = pred_var[ii, jj, kk,
                                                        pred_mean_argmax[ii,
                                                                         jj,
                                                                         kk]]
        print('        Time elapsed: {}'.format(time.time() - start_time))
        assert prediction_variances.shape == semantic_predictions.shape
        #print(np.max(prediction_variance))
        #print(np.min(prediction_variance))

    else:
        if logits is not None:
            logits = logits_i
        else:
            pass

    ### BELOW IS UNCHANGED. ###

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):

        original_image = np.squeeze(original_images[i])
        #image_height = original_image.shape[0]
        #image_width = original_image.shape[1]
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]

        image_filename = os.path.basename(
            image_names[i]).split('.png')[0].split('.jpg')[0]
        if image_filename == '':
            # Save image.
            save_annotation.save_annotation(original_image,
                                            save_dir,
                                            _IMAGE_FORMAT %
                                            (image_id_offset + i),
                                            add_colormap=False)

            # Save prediction.
            save_annotation.save_annotation(crop_semantic_prediction,
                                            save_dir,
                                            _PREDICTION_FORMAT %
                                            (image_id_offset + i),
                                            add_colormap=True,
                                            colormap_type=FLAGS.colormap_type)
        else:
            # Save image.
            save_annotation.save_annotation(original_image,
                                            save_dir,
                                            image_filename,
                                            add_colormap=False)

            # Save prediction.
            save_annotation.save_annotation(crop_semantic_prediction,
                                            save_dir,
                                            image_filename + "_vis",
                                            add_colormap=True,
                                            colormap_type=FLAGS.colormap_type)

        if FLAGS.also_save_raw_predictions:
            #image_filename = os.path.basename(image_names[i])
            image_filename = os.path.basename(
                image_names[i]).split('.png')[0].split('.jpg')[0]

            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction, train_id_to_eval_id)
            save_annotation.save_annotation(crop_semantic_prediction,
                                            raw_save_dir,
                                            image_filename,
                                            add_colormap=False)

        if FLAGS.save_logits:
            assert logits is not None
            image_filename = os.path.basename(
                image_names[i]).split('.png')[0].split('.jpg')[0]
            #print(type(logits))
            #print(logits.shape)
            #logits = np.array(logits)

            with tf.gfile.Open('%s/%s.npy' % (logit_save_dir, image_filename),
                               mode='w') as f:
                np.save(f, logits)

        if FLAGS.compute_uncertainty:
            prediction_variances = prediction_variances.astype(np.float16)
            image_filename = os.path.basename(
                image_names[i]).split('.png')[0].split('.jpg')[0]

            prediction_variance = np.squeeze(prediction_variances[i])
            crop_prediction_variance = prediction_variance[:image_height, :
                                                           image_width]

            with tf.gfile.Open('%s/%s.npy' %
                               (uncertainty_save_dir, image_filename),
                               mode='w') as f:
                np.save(f, crop_prediction_variance)
Exemple #11
0
def _process_batch(sess, original_images, semantic_predictions, gt_labels, image_names,
                   image_heights, image_widths, image_id_offset, update_op, iou, save_dir, save_dir1,
                   raw_save_dir, raw_save_dir_label, utf, mtf, train_id_to_eval_id=None):
  """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
  (original_images,
   semantic_predictions,
   labels,
   image_names,
   image_heights,
   image_widths, confusion_matrix) = sess.run([original_images, semantic_predictions, gt_labels,
                             image_names, image_heights, image_widths, update_op])
  mean_iou = sess.run(iou)
  # print("iou tf: ", m1)
  # print("confusion matrix tf: ", u1)

  # print("shape of labels: ", labels.shape)
  # import pdb; pdb.set_trace()
  num_image = semantic_predictions.shape[0]
  for i in range(num_image):
    image_height = np.squeeze(image_heights[i])
    image_width = np.squeeze(image_widths[i])
    original_image = np.squeeze(original_images[i])
    semantic_prediction = np.squeeze(semantic_predictions[i])
    label = np.squeeze(labels[i])

    crop_semantic_prediction = semantic_prediction[:image_height, :image_width]
    crop_gt_label = label[:image_height, :image_width]

    # Save image.
    save_annotation.save_annotation(
        original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
        add_colormap=False)

    # Save prediction.
    save_annotation.save_annotation(
        crop_semantic_prediction, save_dir,
        _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
        colormap_type=FLAGS.colormap_type)
    
    save_annotation.save_annotation(
        crop_semantic_prediction, save_dir1,
        _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
        colormap_type=FLAGS.colormap_type)
  
    # Save ground truth.
    save_annotation.save_annotation(
        crop_gt_label, save_dir,
        _GT_FORMAT % (image_id_offset + i), add_colormap=True,
        colormap_type=FLAGS.colormap_type)
  

    # label[label==255]=0
    # save_annotation.save_annotation(
    #     label, save_dir,
    #     _GT_FORMAT % (image_id_offset + 100), add_colormap=True,
    #     colormap_type=FLAGS.colormap_type)

    if FLAGS.also_save_raw_predictions:
      image_filename = os.path.basename(image_names[i])

      if train_id_to_eval_id is not None:
        crop_semantic_prediction = _convert_train_id_to_eval_id(
            crop_semantic_prediction,
            train_id_to_eval_id)
      save_annotation.save_annotation(
          crop_semantic_prediction, raw_save_dir, image_filename,
          add_colormap=False, flag='Y', save_dir_label=raw_save_dir_label)

    # print("Mean IoU per class: ", mean_iou)
    miou_class1_back.append(mean_iou[0])
    miou_class2_stem.append(mean_iou[1])
    miou_class3_cal.append(mean_iou[2])
    miou_class4_shoot.append(mean_iou[3])
    miou_img = np.nanmean(mean_iou)
    mean_iou_eval.append(miou_img)
    print("Mean iou: ", miou_img)
    print("Confusion Matrix: ", confusion_matrix)
Exemple #12
0
def _process_batch(sess, original_images, semantic_predictions, raw_predictions, logits, exp, image_names,
                   image_heights, image_widths, image_id_offset, save_dir,
                   raw_save_dir, train_id_to_eval_id=None):
  """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
  (original_images,
   semantic_predictions,
   logits,
   image_names,
   image_heights,
   image_widths) = sess.run([original_images, semantic_predictions, logits,
                             image_names, image_heights, image_widths])

  global to_output_logits_sihl
  global to_output_logits_stmoritz
  global to_output_logits_sils
  global to_output_logits_silvaplana
  
  num_image = semantic_predictions.shape[0]
  logits = logits[common.OUTPUT_TYPE]
  for i in range(num_image):
    image_height = np.squeeze(image_heights[i])
    image_width = np.squeeze(image_widths[i])
    original_image = np.squeeze(original_images[i])
    semantic_prediction = np.squeeze(semantic_predictions[i])
    crop_semantic_prediction = semantic_prediction[:image_height, :image_width]
    logits = np.squeeze(logits[i])
    crop_logits = logits[:image_height, :image_width]

    #plt.imsave(save_dir+"/{}_prob.png".format(image_names[i]), _crop_logits, cmap="seismic")

    #logits_0 = crop_logits[:,:,0]
    #logits_1 = crop_logits[:,:,1]
    #logits_2 = crop_logits[:,:,2]

    #logits_0 = logits_0[(labels == 1) | (labels == 2)]
    #logits_1 = logits_1[(labels == 1) | (labels == 2)]
    #logits_2 = logits_2[(labels == 1) | (labels == 2)]

    #labels = labels[(labels == 1) | (labels == 2)]

    #n_logits_0, n_logits_1, n_logits_2 = softmax([logits_0, logits_1, logits_2])
    #plt.imsave(save_dir+"/{}_prob0.png".format(image_names[i]),1-logits_0, cmap="seismic")
    #plt.imsave(save_dir+"/{}_prob1.png".format(image_names[i]),logits_1, cmap="seismic")
    #plt.imsave(save_dir+"/{}_prob2.png".format(image_names[i]),logits_2, cmap="seismic")
    if "sils" in image_names[i]:
        to_output_logits_sils.append([crop_logits, crop_semantic_prediction, image_names[i]])
    elif "stmoritz" in image_names[i]:
        to_output_logits_stmoritz.append([crop_logits, crop_semantic_prediction, image_names[i]])
    elif "silvaplana" in image_names[i]:
        to_output_logits_silvaplana.append([crop_logits, crop_semantic_prediction, image_names[i]])
    elif "sihl" in image_names[i]:
        to_output_logits_sihl.append([crop_logits, crop_semantic_prediction, image_names[i]])
    #np.save("deeplab/datasets/sar_ice/exp_{}/logits/{}".format(exp,image_names[i]), crop_logits)
        
    #  np.save("deeplab/pr_curve/logits_sils", crop_logits)
    #if "stmoritz" in image_names[i]:
    #  np.save("deeplab/pr_curve/logits_stmoritz", crop_logits)
    #if "silvaplana" in image_names[i]:
    #  np.save("deeplab/pr_curve/logits_silvplana", crop_logits)


    # Save image.
    # Save image.
    save_annotation.save_annotation(
        original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
        add_colormap=False)

    # Save prediction.
    save_annotation.save_annotation(
        crop_semantic_prediction, save_dir,
        image_names[i], add_colormap=True,
        colormap_type=FLAGS.colormap_type)
#        _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
#        colormap_type=FLAGS.colormap_type)

    if FLAGS.also_save_raw_predictions:
      image_filename = os.path.basename(image_names[i])

      if train_id_to_eval_id is not None:
        crop_semantic_prediction = _convert_train_id_to_eval_id(
            crop_semantic_prediction,
            train_id_to_eval_id)
      save_annotation.save_annotation(
          crop_semantic_prediction, raw_save_dir, image_filename,
          add_colormap=False)
Exemple #13
0
def _process_batch(sess,
                   slide_mask,
                   original_images,
                   semantic_predictions,
                   image_names,
                   mask_size,
                   downsample,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   raw_save_dir,
                   train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    mask_size: [x,y] dimentions of the mask
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    (original_images, semantic_predictions, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, image_names, image_heights,
         image_widths
     ])

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]
        image_filename = image_names[i].decode()

        # populate wsi mask
        Ystart = float(image_filename.split('-')[-2])
        Ystart /= downsample
        Ystart = int(round(Ystart))

        Xstart = float(image_filename.split('-')[-3])
        Xstart /= downsample
        Xstart = int(round(Xstart))

        Xstop = min(Xstart + image_width, mask_size[0])
        Ystop = min(Ystart + image_height, mask_size[1])

        slide_mask[Ystart:Ystop, Xstart:Xstop] = np.maximum(
            slide_mask[Ystart:Ystop, Xstart:Xstop],
            semantic_prediction[:Ystop - Ystart, :Xstop - Xstart])

        if FLAGS.also_save_raw_predictions:
            # # Save image.
            # save_annotation.save_annotation(
            #     original_image, raw_save_dir, _IMAGE_FORMAT % (image_id_offset + i),
            #     add_colormap=False)
            #
            # # Save prediction.
            # save_annotation.save_annotation(
            #     crop_semantic_prediction, raw_save_dir,
            #     _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
            #     colormap_type=FLAGS.colormap_type)
            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction, train_id_to_eval_id)
            save_annotation.save_annotation(crop_semantic_prediction,
                                            raw_save_dir,
                                            image_filename,
                                            add_colormap=False)

    return slide_mask
model_ts = int(sys.argv[1])
model_name = 'model-pc-%d' % model_ts

root_dir = join('E:', 'lerner', 'deeplab', 'cache_data', model_name, 'vis')
src_dir = join(root_dir, 'segmentation_results')
dst_dir = join(root_dir, 'segmentation_results_masked')
gt_mask_dir = join('D:', 'datasets', 'processed', 'pascalcontext', 'truth-',
                   'val')

if not os.path.exists(dst_dir):
    os.mkdir(dst_dir)

for f in os.listdir(src_dir):
    if 'image' in f:
        os.rename(join(src_dir, f), join(dst_dir, f))
        continue

    idx = 1 + int(f[:f.index('_')])
    pred = np.array(Image.open(join(src_dir, f)).convert('L'))

    with h5py.File(join(gt_mask_dir, 'val_%06d_pixeltruth.mat' % idx)) as mat:
        gt = mat['truth_img'][:].T

    pred[gt == 0] = 0

    #im = Image.fromarray(pred).convert('RGB')
    #im.save(join(dst_dir, f))

    save_annotation.save_annotation(pred, dst_dir, f)
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   instance_predictions,
                   regression_predictions,
                   panoptic_prediction,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   instance_save_dir,
                   regression_save_dir,
                   panoptic_save_dir,
                   raw_save_dir,
                   train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    instance_predictions: One batch of instance predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    instance_save_dir : The directory where the instance predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    (original_images, semantic_predictions, instance_predictions,
     regression_predictions, instance_segmentation, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, instance_predictions,
         regression_predictions, panoptic_prediction, image_names,
         image_heights, image_widths
     ])

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        instance_predictions = np.squeeze(instance_predictions[i])
        regression_predictions = np.squeeze(regression_predictions[i])

        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]
        crop_instance_prediction = instance_predictions[:image_height, :
                                                        image_width]
        crop_regression_prediction = regression_predictions[:image_height, :
                                                            image_width, :]

        instance_segmentation = np.squeeze(instance_segmentation)
        unique_elements = np.unique(instance_segmentation)

        instance_segmentation_scaled = np.array(instance_segmentation) * (
            255 // len(unique_elements))

        ##########  VIS INSTANCE SEG OUTPUT ##################

        inst_color = cv2.applyColorMap(
            instance_segmentation_scaled.astype('uint8'), cv2.COLORMAP_JET)

        instance_segmentation_coloured = Image.blend(
            Image.fromarray(original_image), Image.fromarray(inst_color), 0.4)

        ######################################################
        # For Creating boundries around instances
        # Add boundry to Image
        colormap_type = FLAGS.colormap_type
        instance_boundry = np.zeros_like(semantic_prediction)
        instances = np.delete(unique_elements, 0)

        for index, i in enumerate(instances):
            local_instance_mask = instance_segmentation == i
            kernel = np.ones((5, 5), np.uint8)
            dilation = cv2.dilate(local_instance_mask.astype('uint8'),
                                  kernel,
                                  iterations=1)
            erosion = cv2.erode(local_instance_mask.astype('uint8'),
                                kernel,
                                iterations=1)
            boundry = (dilation - erosion) * 255
            instance_boundry += boundry

        colored_label = get_dataset_colormap.label_to_color_image(
            semantic_prediction.astype('uint8'), colormap_type)
        colored_label = colored_label + np.dstack(
            (instance_boundry, instance_boundry, instance_boundry))
        colored_label = Image.fromarray(colored_label.astype(dtype=np.uint8))

        panoptic_output = Image.blend(Image.fromarray(original_image),
                                      colored_label, 0.7)

        ######################################################################################

        # Save image.
        save_annotation.save_annotation(original_image,
                                        save_dir,
                                        _IMAGE_FORMAT % (image_id_offset + i),
                                        add_colormap=False)

        # Save instance heatmap prediction.
        save_annotation.save_annotation(crop_instance_prediction,
                                        instance_save_dir,
                                        _PREDICTION_FORMAT %
                                        (image_id_offset + i),
                                        scale_values=True,
                                        add_colormap=False,
                                        colormap_type=FLAGS.colormap_type)

        # Save regression prediction.
        save_annotation.save_annotation_instance_regression(
            instance_segmentation_coloured,
            regression_save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i),
            normalize_values=True,
            add_colormap=False,
            colormap_type=FLAGS.colormap_type)

        # Save prediction.
        save_annotation.save_annotation(crop_semantic_prediction,
                                        save_dir,
                                        _PREDICTION_FORMAT %
                                        (image_id_offset + i),
                                        add_colormap=True,
                                        colormap_type=FLAGS.colormap_type)

        # Save panoptic prediction.
        save_annotation.save_annotation_panoptic(
            panoptic_output,
            panoptic_save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i),
            add_colormap=False,
            colormap_type=FLAGS.colormap_type)

        if FLAGS.also_save_raw_predictions:
            image_filename = os.path.basename(image_names[i])

            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction, train_id_to_eval_id)
            save_annotation.save_annotation(crop_semantic_prediction,
                                            raw_save_dir,
                                            image_filename,
                                            add_colormap=False)
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)

    tf.gfile.MakeDirs(FLAGS.inference_dir)

    elapsed_time = 0

    g = load_graph()
    with g.as_default(), tf.device("/cpu:0"):

        if FLAGS.print_flops:
            opts = tf.profiler.ProfileOptionBuilder.float_operation()
            flops = tf.profiler.profile(g, options=opts)
            if flops is not None:
                print 'Total flops: ', flops.total_float_ops

        image_name = FLAGS.image_path.split('/')[-1]
        image_name, image_extension = image_name.split('.')

        supported_extensions = ['png', 'jpeg', 'jpg']

        if not any(image_extension == extension
                   for extension in supported_extensions):
            raise ValueError('Image extension "{}" not supported...'.format(
                image_extension))

        image = cv2.imread(FLAGS.image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        original_image_dimensions = image.shape[0:2]
        image = cv2.resize(image, tuple(reversed(FLAGS.inference_crop_size)))
        image = np.expand_dims(image, 0)

        input_operation = g.get_operation_by_name('import/' + _INPUT_OP)
        output_operation = g.get_operation_by_name('import/' + _OUTPUT_OP)

        with tf.Session(graph=g) as sess:

            semantic_predictions = None
            if FLAGS.avg_inf_time:
                for i in range(20):
                    start_time = timeit.default_timer()
                    semantic_predictions = sess.run(
                        output_operation.outputs[0],
                        feed_dict={input_operation.outputs[0]: image})

                    elapsed_time += timeit.default_timer() - start_time

                elapsed_time = np.round(elapsed_time / 20, 4)
            else:
                start_time = timeit.default_timer()
                semantic_predictions = sess.run(
                    output_operation.outputs[0],
                    feed_dict={input_operation.outputs[0]: image})

                elapsed_time = timeit.default_timer() - start_time

        print 'Inference time : {} s'.format(elapsed_time)

        result = np.array(semantic_predictions, dtype=np.uint8)
        result = np.squeeze(result)
        result = cv2.resize(result, tuple(reversed(original_image_dimensions)))

        # save raw result...
        save_annotation.save_annotation(result,
                                        FLAGS.inference_dir,
                                        _RAW_FORMAT % image_name,
                                        add_colormap=False)

        # save result as color image...
        save_annotation.save_annotation(result,
                                        FLAGS.inference_dir,
                                        _PREDICTION_FORMAT % image_name,
                                        add_colormap=True,
                                        colormap_type=FLAGS.dataset)

    return elapsed_time
Exemple #17
0
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   raw_save_dir,
                   train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

    Args:
      sess: TensorFlow session.
      original_images: One batch of original images.
      semantic_predictions: One batch of semantic segmentation predictions.
      image_names: Image names.
      image_heights: Image heights.
      image_widths: Image widths.
      image_id_offset: Image id offset for indexing images.
      save_dir: The directory where the predictions will be saved.
      raw_save_dir: The directory where the raw predictions will be saved.
      train_id_to_eval_id: A list mapping from train id to eval id.
    """
    img_save_dir, color_save_dir, raw_save_dir = my_func()

    (original_images, semantic_predictions, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, image_names, image_heights,
         image_widths
     ])

    num_image = semantic_predictions.shape[0]

    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]

        image_filename = os.path.basename(image_names[i]).decode("utf-8")

        # Save image.
        save_annotation.save_annotation(original_image,
                                        img_save_dir,
                                        image_filename,
                                        add_colormap=False)

        # Save prediction.
        save_annotation.save_annotation(crop_semantic_prediction,
                                        color_save_dir,
                                        image_filename,
                                        add_colormap=True,
                                        colormap_type=FLAGS.colormap_type)

        if FLAGS.also_save_raw_predictions:

            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction, train_id_to_eval_id)
            save_annotation.save_annotation(crop_semantic_prediction,
                                            raw_save_dir,
                                            image_filename,
                                            add_colormap=False)
Exemple #18
0
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   instance_predictions,
                   regression_predictions,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   instance_save_dir,
                   regression_save_dir,
                   panoptic_save_dir,
                   raw_save_dir,
                   train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    instance_predictions: One batch of instance predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    instance_save_dir : The directory where the instance predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    (original_images, semantic_predictions, instance_predictions,
     regression_predictions, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, instance_predictions,
         regression_predictions, image_names, image_heights, image_widths
     ])

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        instance_predictions = np.squeeze(instance_predictions[i])
        regression_predictions = np.squeeze(regression_predictions[i])

        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]
        crop_instance_prediction = instance_predictions[:image_height, :
                                                        image_width]
        crop_regression_prediction = regression_predictions[:image_height, :
                                                            image_width, :]
        '''if len(crop_regression_prediction.shape) == 3 and crop_regression_prediction.shape[2] == 2:
        red, green = np.dsplit(crop_regression_prediction, 3)
        blue = np.zeros_like(green)

        crop_regression_prediction = np.concatenate((red, green, blue), axis=2)

    else:
        raise ValueError('Input label y offset shape must be [height, width, 2].')
    #crop_regression_prediction = regression_predictions[:image_height, :image_width]'''

        panoptic_prediction = process_panoptic_output(
            original_image,
            crop_semantic_prediction,
            crop_instance_prediction,
            crop_regression_prediction,
            colormap_type=FLAGS.colormap_type)

        # Save image.
        save_annotation.save_annotation(original_image,
                                        save_dir,
                                        _IMAGE_FORMAT % (image_id_offset + i),
                                        add_colormap=False)

        # Save instance heatmap prediction.
        save_annotation.save_annotation(crop_instance_prediction,
                                        instance_save_dir,
                                        _PREDICTION_FORMAT %
                                        (image_id_offset + i),
                                        scale_values=True,
                                        add_colormap=False,
                                        colormap_type=FLAGS.colormap_type)

        # Save regression prediction.
        save_annotation.save_annotation_instance_regression(
            crop_regression_prediction,
            regression_save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i),
            normalize_values=True,
            add_colormap=False,
            colormap_type=FLAGS.colormap_type)

        # Save prediction.
        save_annotation.save_annotation(crop_semantic_prediction,
                                        save_dir,
                                        _PREDICTION_FORMAT %
                                        (image_id_offset + i),
                                        add_colormap=True,
                                        colormap_type=FLAGS.colormap_type)

        # Save panoptic prediction.
        save_annotation.save_annotation_panoptic(
            panoptic_prediction,
            panoptic_save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i),
            add_colormap=False,
            colormap_type=FLAGS.colormap_type)

        if FLAGS.also_save_raw_predictions:
            image_filename = os.path.basename(image_names[i])

            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction, train_id_to_eval_id)
            save_annotation.save_annotation(crop_semantic_prediction,
                                            raw_save_dir,
                                            image_filename,
                                            add_colormap=False)
Exemple #19
0
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   raw_save_dir,
                   train_id_to_eval_id=None,
                   text_file=None):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    # run_metadata = tf.RunMetadata()
    t = TicToc()
    t.tic()
    (original_images, semantic_predictions, image_names, image_heights,
     image_widths) = sess.run([
         original_images, semantic_predictions, image_names, image_heights,
         image_widths
     ])  # , options=run_options, run_metadata=run_metadata)

    # tl = timeline.Timeline(run_metadata.step_stats)
    # ctf = tl.generate_chrome_trace_format()
    # with open('/home/zhaopeng/Muyan/custom/vis_dir/segmentation_resultsmobilenet_v2/timeline'+str(batch_num)+'.json','w') as f:
    #     f.write(ctf)

    tElapse = t.tocvalue()
    text_file.write("Testing image %s\n" % image_names[0])
    text_file.write("  %f\n" % tElapse)

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        # original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]

        image_filename = os.path.basename(image_names[i])
        image_name = image_filename.decode("utf-8")
        # Save image.
        # save_annotation.save_annotation(
        #     original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
        #     add_colormap=False)

        # Save prediction.
        save_annotation.save_annotation(
            crop_semantic_prediction,
            save_dir,
            image_name[:len(image_name) -
                       4],  # _PREDICTION_FORMAT % (image_id_offset + i),
            add_colormap=True,
            colormap_type=FLAGS.colormap_type)

        if FLAGS.also_save_raw_predictions:
            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction, train_id_to_eval_id)
            save_annotation.save_annotation(crop_semantic_prediction,
                                            raw_save_dir,
                                            image_filename,
                                            add_colormap=False)
Exemple #20
0
def _process_batch(sess, original_images, labels, mybatch_num, semantic_predictions, image_names, #pang-add
                   image_heights, image_widths, image_id_offset, save_dir,
                   raw_save_dir, train_id_to_eval_id=None):
  """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
  '''
  (original_images,
   semantic_predictions,
   image_names,
   image_heights,
   image_widths) = sess.run([original_images, semantic_predictions,
                             image_names, image_heights, image_widths])
  '''
  (original_images,
   labels, #pang-add
   semantic_predictions,
   image_names,
   image_heights,
   image_widths) = sess.run([original_images, labels, semantic_predictions,
                             image_names, image_heights, image_widths])

  num_image = semantic_predictions.shape[0]
  
  #print(num_image) #pang-add
  #print('pangllalalallalallalalallalaql----------------------------------')
  for i in range(num_image):
    image_height = np.squeeze(image_heights[i])
    image_width = np.squeeze(image_widths[i])
    original_image = np.squeeze(original_images[i])
    label = np.squeeze(labels[i]) #pang-add
    semantic_prediction = np.squeeze(semantic_predictions[i])
    crop_semantic_prediction = semantic_prediction[:image_height, :image_width]


    #print(type(crop_semantic_prediction)) #pang-add
    #print(type(label))
    pang_prediction = crop_semantic_prediction
    pang_label = label
    im_pang_prediction = Image.fromarray(pang_prediction.astype(dtype=np.uint8))
    #print(type(im_pang_prediction))
    my_str_path_label = '/home/pzn/dataset/result_pre_label_new/'+str(mybatch_num)+'_my_label.png'
    my_str_path_predict = '/home/pzn/dataset/result_pre_label_new/'+str(mybatch_num)+'_my_predict.png'
    #print(my_str_path_label)
    #print(my_str_path_predict)
    im_pang_prediction.save(my_str_path_label, 'PNG')
    im_pang_label = Image.fromarray(pang_label.astype(dtype=np.uint8))
    #print(type(im_pang_label))
    im_pang_label.save(my_str_path_predict, 'PNG')
      
      

    #print('------------lalalalallalal---------------------') #pang-add
    #print(type(original_image))
    # Save image.
    save_annotation.save_annotation(
        original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
        add_colormap=False)

    # Save image_label. #pang-add
    save_annotation.save_annotation(
        label, save_dir, _IMAGE_FORMAT_LABEL % (image_id_offset + i),
        add_colormap=False)


    # Save prediction.
    save_annotation.save_annotation(
        crop_semantic_prediction, save_dir,
        _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
        colormap_type=FLAGS.colormap_type)

    if FLAGS.also_save_raw_predictions:
      image_filename = os.path.basename(image_names[i])

      if train_id_to_eval_id is not None:
        crop_semantic_prediction = _convert_train_id_to_eval_id(
            crop_semantic_prediction,
            train_id_to_eval_id)
      save_annotation.save_annotation(
          crop_semantic_prediction, raw_save_dir, image_filename,
          add_colormap=False)
def main(unused_argv):
    tf.logging.set_verbosity(tf.logging.INFO)

    tf.gfile.MakeDirs(FLAGS.inference_dir)

    g = tf.Graph()
    with g.as_default():
        image_name = FLAGS.image_path.split('/')[-1]
        image_name, image_extension = image_name.split('.')

        supported_extensions = ['png', 'jpeg', 'jpg']

        if not any(image_extension == extension
                   for extension in supported_extensions):
            raise ValueError('Image extension "{}" not supported...'.format(
                image_extension))

        reader = build_data.ImageReader(image_extension)
        image = reader.decode_image(
            tf.gfile.FastGFile(FLAGS.image_path, 'r').read())
        image = tf.identity(image)
        original_image_dimensions = image.get_shape().as_list()[0:2]
        original_image_dimensions = reversed(original_image_dimensions)

        image = tf.image.resize_images(image, [480, 640],
                                       method=tf.image.ResizeMethod.BILINEAR,
                                       align_corners=True)
        image.set_shape([None, None, 3])
        image = tf.expand_dims(image, 0)

        model_options = common.ModelOptions(
            outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
            crop_size=FLAGS.inference_crop_size,
            atrous_rates=FLAGS.atrous_rates,
            output_stride=FLAGS.output_stride)

        predictions = model.predict_labels(image,
                                           model_options=model_options,
                                           image_pyramid=None)
        predictions = predictions[common.OUTPUT_TYPE]
        # predictions = tf.image.resize_images(
        #     predictions, original_image_dimensions,
        #     method=tf.image.ResizeMethod.BILINEAR,
        #     align_corners=True)

        param_stats = tf.profiler.profile(
            tf.get_default_graph(),
            options=tf.profiler.ProfileOptionBuilder.
            trainable_variables_parameter())
        print('Total parameters: ', param_stats.total_parameters)

        total_parameters = 0
        for variable in tf.trainable_variables():

            shape = variable.get_shape()
            variable_parameters = 1
            for dim in shape:
                variable_parameters *= dim.value
            total_parameters += variable_parameters
        print('Total parameters: ', total_parameters)

        tf.train.get_or_create_global_step()
        saver = tf.train.Saver(slim.get_variables_to_restore())
        sv = tf.train.Supervisor(graph=g,
                                 logdir=FLAGS.inference_dir,
                                 init_op=tf.global_variables_initializer(),
                                 summary_op=None,
                                 summary_writer=None,
                                 global_step=None,
                                 saver=saver)

        with sv.managed_session(start_standard_services=False) as sess:
            sv.start_queue_runners(sess)
            sv.saver.restore(sess, FLAGS.checkpoint_path)
            semantic_predictions = sess.run(predictions)

        result = np.array(semantic_predictions, dtype=np.uint8)
        result = np.squeeze(result)
        result = cv2.resize(result, tuple(original_image_dimensions))

        # save raw result...
        save_annotation.save_annotation(result,
                                        FLAGS.inference_dir,
                                        _RAW_FORMAT % image_name,
                                        add_colormap=False)

        # save result as color image...
        save_annotation.save_annotation(result,
                                        FLAGS.inference_dir,
                                        _PREDICTION_FORMAT % image_name,
                                        add_colormap=True,
                                        colormap_type=FLAGS.dataset)