def get_data_predict_overlap(imgPredict, patch_height, patch_width, stride_height, stride_width,num_lesion,total_data): ### test img = imgPredict test_imgs_original = [] test_imgs_original .append(img) test_imgs_original=np.asarray(test_imgs_original) test_imgs_original=np.transpose(test_imgs_original,(0,3,1,2)) test_imgs_adjust = my_PreProc(test_imgs_original) #extend both images and masks so they can be divided exactly by the patches dimensions #test_imgs = test_imgs[0:Imgs_to_test,:,:,:] #test_masks = test_masks[0:Imgs_to_test,:,:,:] test_imgs = paint_border_overlap(test_imgs_adjust, patch_height, patch_width, stride_height, stride_width) #check masks are within 0-1 print("\ntest images shape:") print (test_imgs.shape) #extract the TEST patches from the full images patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width) print("\ntest PATCHES images shape:") print(patches_imgs_test.shape) print("test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test))) return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3],test_imgs_adjust
def get_data_testing_overlap(DRIVE_test_imgs_original, DRIVE_test_groudTruth, patch_height, patch_width, stride_height, stride_width,num_lesion,total_data): ### test test_imgs_original = load_hdf5(DRIVE_test_imgs_original) test_masks=np.zeros([total_data,num_lesion,test_imgs_original.shape[2],test_imgs_original.shape[3]]) test_masks_temp = load_hdf5(DRIVE_test_groudTruth)#masks always the same test_masks=test_masks_temp test_imgs = my_PreProc(test_imgs_original) print (np.max(test_masks),np.min(test_masks)) test_masks = test_masks/255. #extend both images and masks so they can be divided exactly by the patches dimensions #test_imgs = test_imgs[0:Imgs_to_test,:,:,:] #test_masks = test_masks[0:Imgs_to_test,:,:,:] test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width) test_masks = paint_border_overlap(test_masks,patch_height, patch_width, stride_height, stride_width) #check masks are within 0-1 assert(np.max(test_masks)==1 and np.min(test_masks)==0) print ("\ntest images shape:") print (test_imgs.shape) print ("\ntest mask shape:") print (test_masks.shape) print ("test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs))) print ("test masks are within 0-1\n") #extract the TEST patches from the full images patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width) print("\ntest PATCHES images shape:") print( patches_imgs_test.shape) print ("test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test))) return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth, patch_height, patch_width, N_subimgs, inside_FOV, num_lesion, total_data ): train_imgs_original = load_hdf5(DRIVE_train_imgs_original)#[img_id:img_id+1] train_masks=np.zeros([total_data,num_lesion,train_imgs_original.shape[2],train_imgs_original.shape[3]]) for i in range(num_lesion): train_masks_temp = load_hdf5(DRIVE_train_groudTruth +str(i+1)+'.hdf5')#[img_id:img_id+1]#masks always the same train_masks[:,i,:,:]=train_masks_temp[:,0,:,:] print("mask:",train_masks_temp.shape) print(train_masks[:,0,:,:].shape) print(train_imgs_original[:,0,:,:].shape) train_imgs = my_PreProc(train_imgs_original) print(train_imgs[:,0,:,:].shape) train_masks = train_masks/255. train_imgs = train_imgs[:,:,7:429,:] #cut bottom and top so now it is 422*422 train_masks = train_masks[:,:,7:429,:] #cut bottom and top so now it is 422*422 data_consistency_check(train_imgs,train_masks) #check masks are within 0-1 assert(np.min(train_masks)==0 and np.max(train_masks)==1) print ("\ntrain images shape:") print (train_imgs.shape) print ("\ntrain masks shape:") print (train_masks.shape) print ("train images 0 range (min-max): " +str(np.min(train_imgs[:,0])) +' - '+str(np.max(train_imgs[:,0]))) print ("train images 1 range (min-max): " +str(np.min(train_imgs[:,1])) +' - '+str(np.max(train_imgs[:,1]))) print ("train images 2 range (min-max): " +str(np.min(train_imgs[:,2])) +' - '+str(np.max(train_imgs[:,2]))) print ("train masks are within 0-1\n") #extract the TRAINING patches from the full images patches_imgs_train, patches_masks_train = extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs,inside_FOV) data_consistency_check(patches_imgs_train, patches_masks_train) print ("\ntrain PATCHES images/masks shape:") print (patches_imgs_train.shape) print (patches_masks_train.shape) print ("train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train))) return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width,num_lesion,total_data): ### test test_imgs_original = load_hdf5(DRIVE_test_imgs_original) test_masks=np.zeros([total_data,num_lesion,test_imgs_original.shape[2],test_imgs_original.shape[3]]) for i in range(num_lesion): test_masks_temp = load_hdf5(DRIVE_test_groudTruth +str(i+1)+'.hdf5')#[img_id:img_id+1]#masks always the same test_masks[:,i,:,:]=test_masks_temp[:,0,:,:] test_imgs = my_PreProc(test_imgs_original) test_masks = test_masks/255. #extend both images and masks so they can be divided exactly by the patches dimensions test_imgs = test_imgs[0:Imgs_to_test,:,:,:] test_masks = test_masks[0:Imgs_to_test,:,:,:] test_imgs = paint_border(test_imgs,patch_height,patch_width) test_masks = paint_border(test_masks,patch_height,patch_width) data_consistency_check(test_imgs, test_masks) #check masks are within 0-1 assert(np.max(test_masks)==1 and np.min(test_masks)==0) print ("\ntest images/masks shape:") print (test_imgs.shape) print ("test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs))) print ("test masks are within 0-1\n") #extract the TEST patches from the full images patches_imgs_test = extract_ordered(test_imgs,patch_height,patch_width) patches_masks_test = extract_ordered(test_masks,patch_height,patch_width) data_consistency_check(patches_imgs_test, patches_masks_test) print ("\ntest PATCHES images/masks shape:") print (patches_imgs_test.shape) print ("test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test))) return patches_imgs_test, patches_masks_test
preds.append(outputs) predictions = np.concatenate(preds, axis=0) print("Predictions finished") #===== Convert the prediction arrays in corresponding images pred_patches = pred_to_imgs(predictions, patch_height, patch_width, "original") #========== Elaborate and visualize the predicted images ==================== pred_imgs = None orig_imgs = None gtruth_masks = None if average_mode == True: pred_imgs = recompone_overlap(pred_patches, new_height, new_width, stride_height, stride_width) # predictions orig_imgs = my_PreProc( test_imgs_orig[0:pred_imgs.shape[0], :, :, :]) #originals gtruth_masks = masks_test #ground truth masks else: pred_imgs = recompone(pred_patches, 13, 12) # predictions orig_imgs = recompone(patches_imgs_test, 13, 12) # originals gtruth_masks = recompone(patches_masks_test, 13, 12) #masks # apply the DRIVE masks on the repdictions #set everything outside the FOV to zero!! kill_border(pred_imgs, test_border_masks) #DRIVE MASK #only for visualization ## back to original dimensions orig_imgs = orig_imgs[:, :, 0:full_img_height, 0:full_img_width] pred_imgs = pred_imgs[:, :, 0:full_img_height, 0:full_img_width] gtruth_masks = gtruth_masks[:, :, 0:full_img_height, 0:full_img_width] print("Orig imgs shape: " + str(orig_imgs.shape)) print("pred imgs shape: " + str(pred_imgs.shape))