Пример #1
0
    def test(self):
        if self.load_model():
            print(' [*] Load SUCCESS!')
        else:
            print(' [!] Load Failed...')

        # read test data
        test_data_files = utils.all_files_under(self.dataset.night_path)
        total_time = 0.

        for idx in range(len(test_data_files)):
            img = utils.imagefiles2arrs([test_data_files[idx]])  # read img
            img = utils.transform(img)  # convert [0, 255] to [-1., 1.]

            # measure inference time
            start_time = time.time()
            imgs = self.model.test_step(img, mode='YtoX')  # inference
            total_time += time.time() - start_time

            self.model.plots(imgs, idx, self.dataset.image_size,
                             self.test_out_dir)  # write results

        print('Avg PT: {:3f} msec.'.format(total_time / len(test_data_files) *
                                           1000.))
Пример #2
0
    # load the model and weights
    with open(f_model.format(dataset), 'r') as f:
        model = model_from_json(f.read())
    model.load_weights(f_weights.format(dataset))

    # iterate all images
    img_size = (640, 640) if dataset == "DRIVE" else (720, 720)
    ori_shape = (1, 584, 565) if dataset == "DRIVE" else (1, 605,
                                                          700)  # batchsize=1
    fundus_files = utils.all_files_under(fundus_dir.format(dataset))
    mask_files = utils.all_files_under(mask_dir.format(dataset))
    for index, fundus_file in enumerate(fundus_files):
        print("processing {}...".format(fundus_file))
        # load imgs
        img = utils.imagefiles2arrs([fundus_file])
        mask = utils.imagefiles2arrs([mask_files[index]])

        # z score with mean, std (batchsize=1)
        mean = np.mean(img[0, ...][mask[0, ...] == 255.0], axis=0)
        std = np.std(img[0, ...][mask[0, ...] == 255.0], axis=0)
        img[0, ...] = (img[0, ...] - mean) / std

        # run inference
        padded_img = utils.pad_imgs(img, img_size)
        #padded_img=utils.rescale_imgs(img, img_size)
        vessel_img = model.predict(padded_img, batch_size=1) * 255
        # rescaled_vessel = utils.rescale_to_original(vessel_img[...,0], ori_shape)
        # final_result = utils.remain_in_mask(rescaled_vessel[0,...], mask[0,...])
        cropped_vessel = utils.crop_to_original(vessel_img[..., 0], ori_shape)
        final_result = utils.remain_in_mask(cropped_vessel[0, ...], mask[0,
Пример #3
0
    )
FLAGS, _ = parser.parse_known_args()

os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_index

# load the models and corresponding weights
with open("../model/vessel/network.json", 'r') as f:
    vessel_model = model_from_json(f.read())
vessel_model.load_weights("../model/vessel/network_weight.h5")

# make directories
if not os.path.isdir(FLAGS.out_dir):
    os.makedirs(FLAGS.out_dir)

# iterate all images
img_size = (640, 640)
filenames = utils.all_files_under(FLAGS.img_dir)
for filename in filenames:
    # load an img (tensor shape of [1,h,w,3])
    img = utils.imagefiles2arrs([filename])
    _, h, w, _ = img.shape
    
    # z score with mean, std (batchsize=1)
    mean = np.mean(img[0, ...][img[0, ..., 0] > 40.0], axis=0)
    std = np.std(img[0, ...][img[0, ..., 0] > 40.0], axis=0)
    img[0, ...] = (img[0, ...] - mean) / std
    assert len(mean) == 3 and len(std) == 3
    
    # run inference & save the result
    vessel = vessel_model.predict(img, batch_size=1)
    Image.fromarray((vessel[0, ..., 0]*255).astype(np.uint8)).save(os.path.join(FLAGS.out_dir, os.path.basename(filename)))
Пример #4
0
import utils
import os
import numpy as np
from sklearn.metrics.classification import confusion_matrix
import argparse
from skimage import measure
from scipy.spatial.distance import dice
from keras.models import model_from_json

os.environ['CUDA_VISIBLE_DEVICES'] = "3"

gt_dir = "../data/original/OD_Segmentation_Training_Set/"
gt_filenames = utils.all_files_under(gt_dir)
gt = utils.imagefiles2arrs(gt_filenames).astype(np.uint8)

# load the models and corresponding weights
with open("../model/vessel/network.json", 'r') as f:
    vessel_model = model_from_json(f.read())
vessel_model.load_weights("../model/vessel/network_weight.h5")
with open("../model/od_from_fundus_vessel/network.json", 'r') as f:
    od_from_fundus_vessel_model = model_from_json(f.read())
od_from_fundus_vessel_model.load_weights(
    "../model/od_from_fundus_vessel/network_weight.h5")

# iterate all images
filenames = utils.all_files_under("../data/original/Training_c/")
pred = np.zeros(gt.shape)
for index, filename in enumerate(filenames):
    assert "IDRiD_" in os.path.basename(filename)
    # load an img (tensor shape of [1,h,w,3])
    img = utils.imagefiles2arrs([filename])
Пример #5
0
for model_type in ["final_model"]:
    for task in tasks:
        if task == "SE":
            val_indices = [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61]
        else:
            val_indices = [0, 1, 2, 3, 4, 54, 55, 56, 57, 58, 59, 60, 61]

        gt_dir = gt_dir_template.format(task)
        pred_dir = pred_dir_template.format(model_type, task)
        gt_filenames = utils.all_files_under(gt_dir)
        pred_filenames = utils.all_files_under(pred_dir)
        training_indices = ~np.in1d(range(len(pred_filenames)), val_indices)

        # build gt arrays
        index_gt = 0
        pred_all = utils.imagefiles2arrs(pred_filenames)
        gt_all = np.zeros(pred_all.shape)
        for index_pred in range(len(pred_filenames)):
            # build array of gt
            if index_gt < len(gt_filenames) and os.path.basename(
                    pred_filenames[index_pred]).replace(
                        ".jpg", "") in os.path.basename(
                            gt_filenames[index_gt]):
                gt = utils.imagefiles2arrs(
                    gt_filenames[index_gt:index_gt + 1]).astype(np.uint8)[0,
                                                                          ...]
                gt_all[index_pred, ...] = gt
                index_gt += 1

        # compute sensitivity and specificity
        aupr_all, best_f1_all, best_f1_thresh_all, sen_all, ppv_all = utils.pr_metric(
Пример #6
0
import utils
import os
import numpy as np
from sklearn.metrics.classification import confusion_matrix
import argparse
from skimage import measure
from scipy.spatial.distance import dice

gt_dir = "../data/original/OD_Segmentation_Training_Set/"
pred_dir = "../../inference_codes/outputs_sub3/segmentation/"
gt_filenames = utils.all_files_under(gt_dir)
pred_filenames = utils.all_files_under(pred_dir)
list_sen, list_spe, list_jaccard, list_dice, list_dice_ori = [], [], [], [], []
for index in range(len(gt_filenames)):
    # get gt and pred segs
    gt = utils.imagefiles2arrs(gt_filenames[index:index + 1]).astype(
        np.uint8)[0, ...]
    pred = utils.imagefiles2arrs(pred_filenames[index:index + 1]).astype(
        np.uint8)[0, ...] // 255
    assert len(gt.shape) == 2 and len(pred.shape) == 2

    # compute sensitivity and specificity
    print pred_filenames[index]
    cm, spe, sen, dice_val, jaccard_val = utils.seg_metrics(gt, pred)

    # print results and store to lists
    print "--- {} ---".format(os.path.basename(gt_filenames[index]))
    print "specificity: {}".format(spe)
    print "sensitivity: {}".format(sen)
    print "jaccard: {}".format(jaccard_val)
    print "dice: {}".format(dice_val)
    list_spe.append(spe)
import utils
import argparse

# arrange arguments
parser = argparse.ArgumentParser()
FLAGS, _ = parser.parse_known_args()
gt_dir = "../data/original/OD_Segmentation_Training_Set/"
pred_dir = "../outputs/final_results_loss_weight_1_0.1_non_thre/od_from_fundus_vessel_visualization"

gt_filenames = utils.all_files_under(gt_dir)
pred_filenames = utils.all_files_under(pred_dir)

# build gt arrays
pred_all = utils.imagefiles2arrs(pred_filenames)
pred_all /= 255.
gt_all = utils.imagefiles2arrs(gt_filenames)

dice, threshold = utils.segmentation_optimal_threshold(gt_all, pred_all)

print "dice: {}".format(dice)
print "best threshold: {}".format(threshold)
Пример #8
0
    localization_model = model_from_json(f.read())
localization_model.load_weights(
    "../model/od_fovea_localization/network_weight.h5")

# make directories
img_out_dir = os.path.join(FLAGS.out_dir, "pts_check")
if not os.path.isdir(img_out_dir):
    os.makedirs(img_out_dir)

# iterate all images
filepaths = utils.all_files_under(FLAGS.img_dir)
image_no, od, fovea = [], {"x": [], "y": []}, {"x": [], "y": []}
for filepath in filepaths:
    assert "IDRiD_" in os.path.basename(filepath)
    # load an img (tensor shape of [1,h,w,3])
    img = utils.imagefiles2arrs([filepath])
    _, h, w, _ = img.shape
    assert h == 2848 and w == 4288
    resized_img = utils.resize_img(img)

    # run inference
    vessel = vessel_model.predict(utils.normalize(resized_img,
                                                  "vessel_segmentation"),
                                  batch_size=1)
    predicted_coords, _ = localization_model.predict([
        utils.normalize(resized_img, "od_from_fundus_vessel"),
        (vessel * 255).astype(np.uint8) / 255.
    ],
                                                     batch_size=1)

    # convert to coordinates in the original image