Пример #1
0
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
tf.keras.mixed_precision.experimental.set_policy(policy)

####################################  Load Data #####################################
folder = '../BCDU-Net/Lung Segmentation/processed_data/'
te_data = np.load(folder + 'data_test.npy')
FOV = np.load(folder + 'FOV_te.npy')
te_mask = np.load(folder + 'mask_test.npy')

te_data = np.expand_dims(te_data, axis=3)

print('Dataset loaded')
#te_data2  = dataset_normalized(te_data)
te_data2 = te_data / 255.
model = M.BCDU_net_D3(input_size=(512, 512, 1))
model.summary()

model.load_weights('../BCDU-Net/Lung Segmentation/weight_lung')
predictions = model.predict(te_data2, batch_size=1, verbose=1)

# Post-processing
predictions = np.squeeze(predictions)
predictions = np.where(predictions > 0.5, 1, 0)
plt.imshow(np.uint8(np.squeeze(predictions[0])))
plt.savefig("test.png")
exit()
Estimated_lung = np.where((FOV - predictions) > 0.5, 1, 0)

print(Estimated_lung)
print(Estimated_lung.shape)
Пример #2
0
name_experiment = 'test'
#training settings

batch_size = 8

####################################  Load Data #####################################3
patches_imgs_train = np.load('patches_imgs_train.npy')
patches_masks_train = np.load('patches_masks_train.npy')

patches_imgs_train = np.einsum('klij->kijl', patches_imgs_train)
patches_masks_train = np.einsum('klij->kijl', patches_masks_train)

print('Patch extracted')

#model = M.unet2_segment(input_size = (64,64,1))
model = M.BCDU_net_D3(input_size=(64, 64, 1))
model.summary()

print('Training')

nb_epoch = 50

mcp_save = ModelCheckpoint('weight_lstm.hdf5',
                           save_best_only=True,
                           monitor='val_loss',
                           mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.1,
                                   patience=7,
                                   verbose=1,
                                   epsilon=1e-4,
Пример #3
0
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from tensorflow import keras
import skimage
from skimage.io import imread, imsave
from skimage.transform import resize
from skimage.color import gray2rgb

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

dataset_add = "/home/users/DATASETS/MapBiomas_SAR/"

model = M.BCDU_net_D3(input_size = (32,32,10))
model.summary()
model.load_weights('weights')

dataset_mean_ind = (-15.34541179, -8.87553847)
dataset_std_ind = (1.53520544, 1.45585154)

file = open("data_split/test.txt", "r")
te_list = [line.rstrip('\n') for line in file]
file.close()

for idx in range(len(te_list)):

    print(idx)
    path = te_list[idx]
   
Пример #4
0
def testing(channels, height, width, img_dir, borderMasks_dir, path_data = "./dataset_training/"):
    
    img = np.empty((height, width, channels))
    border_mask = np.empty((height, width))

    if not os.path.exists(path_data):
        os.makedirs(path_data)
 
    #with path, subdirs, file in os.walk(self.img_dir):
    img = Image.open(img_dir)       
    b_mask = Image.open(borderMasks_dir)
    #border_mask = np.reshape(border_mask,(1,height,width))

    assert(np.max(border_masks)==255)
    assert(np.min(border_masks)==0)

    """
    img = np.transpose(img,(0,3,1,2))
    assert(img.shape == (channels,height,width))
    border_mask = np.reshape(border_masks,(1,height,width))
    assert(border_mask.shape == (1,height,width))
    """

    print ("saving train datasets")
    write_hdf5(img, path_data + "img.hdf5")
    write_hdf5(border_mask, path_data + "borderMask.hdf5")
    

    #original test images (for FOV selection)
    DRIVE_test_imgs_original = path_data + 'img.hdf5'
    test_imgs_orig = load_hdf5(DRIVE_test_imgs_original)
    full_img_height = test_imgs_orig.shape[2]
    full_img_width = test_imgs_orig.shape[3]

    #the border masks provided by the DRIVE
    DRIVE_test_border_masks = path_data + 'borderMask.hdf5'
    test_border_masks = load_hdf5(DRIVE_test_border_masks)
    # dimension of the patches
    patch_height = 64
    patch_width  = 64
    #the stride in case output with average
    stride_height = 5
    stride_width  = 5
    assert (stride_height < patch_height and stride_width < patch_width)
    #model name
    name_experiment = 'test'
    path_experiment = './' + name_experiment +'/'
    #N full images to be predicted
    Imgs_to_test = 2
    #Grouping of the predicted images
    N_visual = 1
    #====== average mode ===========
    average_mode = True

    #============ Load the data and divide in patches
    patches_imgs_test = None
    new_height = None
    new_width = None
    masks_test  = None
    patches_masks_test = None
    if average_mode == True:
        patches_imgs_test, new_height, new_width, masks_test = get_data_testing_overlap(
            DRIVE_test_imgs_original = DRIVE_test_imgs_original,  #original
            DRIVE_test_groudTruth = path_data + 'DRIVE_dataset_groundTruth_test.hdf5',  #masks
            Imgs_to_test = 20,
            patch_height = patch_height,
            patch_width = patch_width,
            stride_height = stride_height,
            stride_width = stride_width
        )
    else:
        patches_imgs_test, patches_masks_test = get_data_testing(
            DRIVE_test_imgs_original = DRIVE_test_imgs_original,  #original
            DRIVE_test_groudTruth = path_data + 'DRIVE_dataset_groundTruth_test.hdf5',  #masks
            Imgs_to_test = 20,
            patch_height = patch_height,
            patch_width = patch_width,
        )



    #================ Run the prediction of the patches ==================================
    best_last = 'best'
    patches_imgs_test = np.einsum('klij->kijl', patches_imgs_test)

    model = M.BCDU_net_D3(input_size = (64,64,1))
    model.summary()
    model.load_weights('weight_lstm.hdf5')
    predictions = model.predict(patches_imgs_test, batch_size=16, verbose=1)

    predictions = np.einsum('kijl->klij', predictions)
    print(patches_imgs_test.shape)

    pred_patches = predictions

    print ("predicted images size :")
    print (predictions.shape)

    #===== Convert the prediction arrays in corresponding images

    #========== Elaborate and visualize the predicted images ====================
    pred_imgs = None
    orig_imgs = None
    gtruth_masks = None
    if average_mode == True:
        pred_imgs = recompone_overlap(pred_patches, new_height, new_width, stride_height, stride_width)# predictions
        orig_imgs = my_PreProc(test_imgs_orig[0:pred_imgs.shape[0],:,:,:])    #originals
        gtruth_masks = masks_test  #ground truth masks
    else:
        pred_imgs = recompone(pred_patches,13,12)       # predictions
        orig_imgs = recompone(patches_imgs_test,13,12)  # originals
        gtruth_masks = recompone(patches_masks_test,13,12)  #masks
    # apply the DRIVE masks on the repdictions #set everything outside the FOV to zero!!
    print('killing border')
    kill_border(pred_imgs, test_border_masks)  #DRIVE MASK  #only for visualization
    ## back to original dimensions
    orig_imgs = orig_imgs[:,:,0:full_img_height,0:full_img_width]
    pred_imgs = pred_imgs[:,:,0:full_img_height,0:full_img_width]
    gtruth_masks = gtruth_masks[:,:,0:full_img_height,0:full_img_width]
    np.save('pred_imgs',pred_imgs)
    print ("Orig imgs shape: " +str(orig_imgs.shape))
    print ("pred imgs shape: " +str(pred_imgs.shape))
    print ("Gtruth imgs shape: " +str(gtruth_masks.shape))

    np.save('resutls', pred_imgs)
    np.save('origin', gtruth_masks)
    assert (orig_imgs.shape[0]==pred_imgs.shape[0] and orig_imgs.shape[0]==gtruth_masks.shape[0])
    N_predicted = orig_imgs.shape[0]
    group = N_visual
    assert (N_predicted%group==0)



    #====== Evaluate the results
    print ("\n\n========  Evaluate the results =======================")
    #predictions only inside the FOV
    y_scores, y_true = pred_only_FOV(pred_imgs,gtruth_masks, test_border_masks)  #returns data only inside the FOV
    print(y_scores.shape)

    print ("Calculating results only inside the FOV:")
    print ("y scores pixels: " +str(y_scores.shape[0]) +" (radius 270: 270*270*3.14==228906), including background around retina: " +str(pred_imgs.shape[0]*pred_imgs.shape[2]*pred_imgs.shape[3]) +" (584*565==329960)")
    print ("y true pixels: " +str(y_true.shape[0]) +" (radius 270: 270*270*3.14==228906), including background around retina: " +str(gtruth_masks.shape[2]*gtruth_masks.shape[3]*gtruth_masks.shape[0])+" (584*565==329960)")

    #Area under the ROC curve
    fpr, tpr, thresholds = roc_curve((y_true), y_scores)
    AUC_ROC = roc_auc_score(y_true, y_scores)
    # test_integral = np.trapz(tpr,fpr) #trapz is numpy integration
    print ("\nArea under the ROC curve: " +str(AUC_ROC))
    roc_curve =plt.figure()
    plt.plot(fpr,tpr,'-',label='Area Under the Curve (AUC = %0.4f)' % AUC_ROC)
    plt.title('ROC curve')
    plt.xlabel("FPR (False Positive Rate)")
    plt.ylabel("TPR (True Positive Rate)")
    plt.legend(loc="lower right")
    plt.savefig(path_experiment+"ROC.png")

    #Precision-recall curve
    precision, recall, thresholds = precision_recall_curve(y_true, y_scores)
    precision = np.fliplr([precision])[0]  #so the array is increasing (you won't get negative AUC)
    recall = np.fliplr([recall])[0]  #so the array is increasing (you won't get negative AUC)
    AUC_prec_rec = np.trapz(precision,recall)
    print ("\nArea under Precision-Recall curve: " +str(AUC_prec_rec))
    prec_rec_curve = plt.figure()
    plt.plot(recall,precision,'-',label='Area Under the Curve (AUC = %0.4f)' % AUC_prec_rec)
    plt.title('Precision - Recall curve')
    plt.xlabel("Recall")
    plt.ylabel("Precision")
    plt.legend(loc="lower right")
    plt.savefig(path_experiment+"Precision_recall.png")

    #Confusion matrix
    threshold_confusion = 0.5
    print ("\nConfusion matrix:  Custom threshold (for positive) of " +str(threshold_confusion))
    y_pred = np.empty((y_scores.shape[0]))
    for i in range(y_scores.shape[0]):
        if y_scores[i]>=threshold_confusion:
            y_pred[i]=1
        else:
            y_pred[i]=0
    confusion = confusion_matrix(y_true, y_pred)
    print (confusion)
    accuracy = 0
    if float(np.sum(confusion))!=0:
        accuracy = float(confusion[0,0]+confusion[1,1])/float(np.sum(confusion))
    print ("Global Accuracy: " +str(accuracy))
    specificity = 0
    if float(confusion[0,0]+confusion[0,1])!=0:
        specificity = float(confusion[0,0])/float(confusion[0,0]+confusion[0,1])
    print ("Specificity: " +str(specificity))
    sensitivity = 0
    if float(confusion[1,1]+confusion[1,0])!=0:
        sensitivity = float(confusion[1,1])/float(confusion[1,1]+confusion[1,0])
    print ("Sensitivity: " +str(sensitivity))
    precision = 0
    if float(confusion[1,1]+confusion[0,1])!=0:
        precision = float(confusion[1,1])/float(confusion[1,1]+confusion[0,1])
    print ("Precision: " +str(precision))

    #Jaccard similarity index
    jaccard_index = jaccard_similarity_score(y_true, y_pred, normalize=True)
    print ("\nJaccard similarity score: " +str(jaccard_index))

    #F1 score
    F1_score = f1_score(y_true, y_pred, labels=None, average='binary', sample_weight=None)
    print ("\nF1 score (F-measure): " +str(F1_score))

    #Save the results
    file_perf = open(path_experiment+'performances.txt', 'w')
    file_perf.write("Area under the ROC curve: "+str(AUC_ROC)
                    + "\nArea under Precision-Recall curve: " +str(AUC_prec_rec)
                    + "\nJaccard similarity score: " +str(jaccard_index)
                    + "\nF1 score (F-measure): " +str(F1_score)
                    +"\n\nConfusion matrix:"
                    +str(confusion)
                    +"\nACCURACY: " +str(accuracy)
                    +"\nSENSITIVITY: " +str(sensitivity)
                    +"\nSPECIFICITY: " +str(specificity)
                    +"\nPRECISION: " +str(precision)
                    )
    file_perf.close()

    # Visualize
    fig,ax = plt.subplots(10,3,figsize=[15,15])

    for idx in range(10):
        ax[idx, 0].imshow(np.uint8(np.squeeze((orig_imgs[idx]))))
        ax[idx, 1].imshow(np.squeeze(gtruth_masks[idx]), cmap='gray')
        ax[idx, 2].imshow(np.squeeze(pred_imgs[idx]), cmap='gray')

    plt.savefig(path_experiment+'sample_results.png')
Пример #5
0
patch_h     = 128
patch_w     = 128
stride_h    = 5
stride_w    = 5
Batch_size  = 8


Estimated_masks = []
True_masks      = []

## Get the dataset for test
Dataset_add = './DIBCO/' 
Te_d, Te_m = UT.get_test_data(Dataset_add, 2016)

## Model
model = M.BCDU_net_D3(input_size = (128, 128,1))
model.load_weights('weight_text.hdf5')

y_scores = []
y_true   = []

## Mask estimation using batches
for idx in range(len(Te_d)):
    IMG = Te_d[idx]
    MSK = Te_m[idx]
    patches , new_h, new_w = UT.extract_ordered_overlap(IMG, patch_h, patch_w, stride_h, stride_w)
    patches     = np.expand_dims(patches,  axis = 3)
    predictions = model.predict(patches, batch_size= Batch_size, verbose=1)
    estimated   = UT.recompone_overlap(predictions[:,:,:,0], new_h, new_w, stride_h, stride_w)
    estimated   = np.where(estimated >= 0.5, 1, 0)
    MSK         = MSK[0: new_h, 0:new_w]
Пример #6
0
        imgs_normalized[i] = (
            (imgs_normalized[i] - np.min(imgs_normalized[i])) /
            (np.max(imgs_normalized[i]) - np.min(imgs_normalized[i]))) * 255
    return imgs_normalized


####################################  Load Data #####################################
te_data = np.load('data_test.npy')
te_mask = np.load('mask_test.npy')
te_mask = np.expand_dims(te_mask, axis=3)

print('ISIC18 Dataset loaded')

te_data2 = dataset_normalized(te_data)

model = M.BCDU_net_D3(input_size=(256, 256, 3))
model.summary()
model.load_weights('weight_isic18')
predictions = model.predict(te_data, batch_size=8, verbose=1)

y_scores = predictions.reshape(
    predictions.shape[0] * predictions.shape[1] * predictions.shape[2] *
    predictions.shape[3], 1)
print(y_scores.shape)

y_true = te_mask.reshape(
    te_mask.shape[0] * te_mask.shape[1] * te_mask.shape[2] * te_mask.shape[3],
    1)

y_scores = np.where(y_scores > 0.5, 1, 0)
y_true = np.where(y_true > 0.5, 1, 0)