Example #1
0
def _process_batch(sess, original_images, semantic_predictions, image_names,
                   image_heights, image_widths, image_id_offset, save_dir,
                   raw_save_dir, train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

    Args:
      sess: TensorFlow session.
      original_images: One batch of original images.
      semantic_predictions: One batch of semantic segmentation predictions.
      image_names: Image names.
      image_heights: Image heights.
      image_widths: Image widths.
      image_id_offset: Image id offset for indexing images.
      save_dir: The directory where the predictions will be saved.
      raw_save_dir: The directory where the raw predictions will be saved.
      train_id_to_eval_id: A list mapping from train id to eval id.
    """
    (original_images,
     semantic_predictions,
     image_names,
     image_heights,
     image_widths) = sess.run([original_images, semantic_predictions,
                               image_names, image_heights, image_widths])

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :image_width]

        # Save image.
        save_annotation.save_annotation(
            original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
            add_colormap=False)

        # Save prediction.
        save_annotation.save_annotation(
            crop_semantic_prediction, save_dir,
            _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
            colormap_type=FLAGS.colormap_type)

        if FLAGS.also_save_raw_predictions:
            image_filename = os.path.basename(image_names[i])

            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction,
                    train_id_to_eval_id)
            save_annotation.save_annotation(
                crop_semantic_prediction, raw_save_dir, image_filename,
                add_colormap=False)
Example #2
0
    image_center = (2.0 / 255) * image_rgb - 1.0
    image_center = image_center.astype(np.float32)
    images = np.expand_dims(image_center, axis=0)
    images_pth = np.expand_dims(np.transpose(image_center, axes=(2, 0, 1)),
                                axis=0)
    images_pth = torch.from_numpy(images_pth).to(device)

    deeplab = model.deeplab(num_classes=21,
                            pretrained=True,
                            atrous_rates=val_atrous_rates,
                            output_stride=val_output_stride,
                            checkpoint_path=checkpoint_path).to(device)
    deeplab.eval()
    with torch.no_grad():
        logits = deeplab(images_pth)
        logits = torch.nn.functional.softmax(logits, dim=1)
        #logits = model.resize_bilinear(logits, original_size)
        labels = torch.argmax(logits, dim=1)
        label_np = np.squeeze(labels.cpu().numpy()).astype(np.uint8)
        label_np = unpad_and_resize(label_np, original_size)

        output_path = image_path.replace('.jpg', '_seg')
        file_name = output_path.split('/')[-1]
        save_dir = output_path.replace(file_name, '')
        save_annotation.save_annotation(label_np,
                                        save_dir,
                                        file_name,
                                        add_colormap=True,
                                        colormap_type=colormap_type)
        print('The result is saved to: ', save_dir)
Example #3
0
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   raw_save_dir,
                   train_id_to_eval_id=None,
                   input_feature,
                   middle_feature,
                   aspp):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    (original_images, semantic_predictions, image_names, image_heights,
     image_widths, input_feature, middle_feature, aspp[0], aspp[1], aspp[2],
     aspp[3]) = sess.run([
         original_images, semantic_predictions, image_names, image_heights,
         image_widths, input_feature, middle_feature, aspp[0], aspp[1],
         aspp[2], aspp[3]
     ])

    num_image = semantic_predictions.shape[0]
    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]
        input_feat = np.squeeze(input_feature[i])
        middle_feat = np.squeeze(middle_feature[i])
        aspp0_feat = np.squeeze(aspp[0][i])
        aspp1_feat = np.squeeze(aspp[1][i])
        aspp2_feat = np.squeeze(aspp[2][i])
        aspp3_feat = np.squeeze(aspp[3][i])
        num_channel = input_feat[-1]
        for j in range(num_channel):
            _save_annotation(input_feat[:, :, j], save_dir + '_input',
                             _FEAT_FORMAT % (image_id_offset + i, j))
        num_channel = middle_feat[-1]
        for j in range(num_channel):
            _save_annotation(middle_feat[:, :, j], save_dir + '_middle',
                             _FEAT_FORMAT % (image_id_offset + i, j))
        num_channel = aspp0_feat[-1]
        for j in range(num_channel):
            _save_annotation(aspp0_feat[:, :, j], save_dir + '_aspp0',
                             _FEAT_FORMAT % (image_id_offset + i, j))
        num_channel = aspp1_feat[-1]
        for j in range(num_channel):
            _save_annotation(aspp1_feat[:, :, j], save_dir + '_aspp1',
                             _FEAT_FORMAT % (image_id_offset + i, j))
        num_channel = aspp2_feat[-1]
        for j in range(num_channel):
            _save_annotation(aspp2_feat[:, :, j], save_dir + '_aspp2',
                             _FEAT_FORMAT % (image_id_offset + i, j))
        num_channel = aspp3_feat[-1]
        for j in range(num_channel):
            _save_annotation(aspp3_feat[:, :, j], save_dir + '_aspp3',
                             _FEAT_FORMAT % (image_id_offset + i, j))
        # Save image.
        save_annotation.save_annotation(original_image,
                                        save_dir,
                                        _IMAGE_FORMAT % (image_id_offset + i),
                                        add_colormap=False)

        # Save prediction.
        save_annotation.save_annotation(crop_semantic_prediction,
                                        save_dir,
                                        _PREDICTION_FORMAT %
                                        (image_id_offset + i),
                                        add_colormap=True,
                                        colormap_type=FLAGS.colormap_type)

        if FLAGS.also_save_raw_predictions:
            image_filename = os.path.basename(image_names[i])

            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction, train_id_to_eval_id)
            save_annotation.save_annotation(crop_semantic_prediction,
                                            raw_save_dir,
                                            image_filename,
                                            add_colormap=False)
Example #4
0
def _process_batch(sess,
                   original_images,
                   semantic_predictions,
                   image_names,
                   image_heights,
                   image_widths,
                   image_id_offset,
                   save_dir,
                   raw_save_dir,
                   logits,
                   train_id_to_eval_id=None):
    """Evaluates one single batch qualitatively.

  Args:
    sess: TensorFlow session.
    original_images: One batch of original images.
    semantic_predictions: One batch of semantic segmentation predictions.
    image_names: Image names.
    image_heights: Image heights.
    image_widths: Image widths.
    image_id_offset: Image id offset for indexing images.
    save_dir: The directory where the predictions will be saved.
    raw_save_dir: The directory where the raw predictions will be saved.
    train_id_to_eval_id: A list mapping from train id to eval id.
  """
    (original_images, semantic_predictions, image_names, image_heights,
     image_widths, logits) = sess.run([
         original_images, semantic_predictions, image_names, image_heights,
         image_widths, logits
     ])

    num_image = semantic_predictions.shape[0]

    for i in range(num_image):
        image_height = np.squeeze(image_heights[i])
        image_width = np.squeeze(image_widths[i])
        original_image = np.squeeze(original_images[i])
        semantic_prediction = np.squeeze(semantic_predictions[i])
        crop_semantic_prediction = semantic_prediction[:image_height, :
                                                       image_width]

        #np.save(FLAGS.logits_file+"/"+str(image_names[i])+".npy",sess.run(logits))
        #print("logits saved")

        # Save image.
        save_annotation.save_annotation(original_image,
                                        save_dir,
                                        "%s_image" % (image_names[i]),
                                        add_colormap=False)

        # Uncomment if you want to save logits along with visualization examples
        #path = FLAGS.logits_file+"/"+ str(image_names[i])
        #np.save(path,logits)
        #print("logits saved")

        #Save ground truth in the vis/segmentation_results folder
        # gt_dir = "/home/pf/pfshare/data/MA_Rajanie/models/research/deeplab/datasets/lake/SegmentationClassPNG"
        # gt = str(image_names[i]).replace("b", "").replace("'", "")+".png"
        # if gt in gt_dir:
        #     shutil.copy(file, save_dir)

        # Save prediction.
        save_annotation.save_annotation(crop_semantic_prediction,
                                        save_dir,
                                        "%s_pred" % (image_names[i]),
                                        add_colormap=True,
                                        colormap_type=FLAGS.colormap_type)

        if FLAGS.also_save_raw_predictions:
            image_filename = os.path.basename(image_names[i])

            if train_id_to_eval_id is not None:
                crop_semantic_prediction = _convert_train_id_to_eval_id(
                    crop_semantic_prediction, train_id_to_eval_id)
            save_annotation.save_annotation(crop_semantic_prediction,
                                            raw_save_dir,
                                            image_filename,
                                            add_colormap=False)