Beispiel #1
0
def get_data_testing_overlap(test_imgs_original, test_groudTruth, Imgs_to_test, patch_height, patch_width,
                             stride_height, stride_width):
    ### test
    test_imgs_original = load_hdf5(test_imgs_original)
    test_masks = load_hdf5(test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks / 255.
    # extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]
    test_masks = test_masks[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    # check masks are within 0-1
    assert (np.max(test_masks) == 1 and np.min(test_masks) == 0)

    print("\ntest images shape:")
    print(test_imgs.shape)
    print("\ntest mask shape:")
    print(test_masks.shape)
    print("test images range (min-max): " + str(np.min(test_imgs)) + ' - ' + str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    # extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    print("\ntest PATCHES images shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " + str(np.min(patches_imgs_test)) + ' - ' + str(
        np.max(patches_imgs_test)))
    patches_imgs_test = np.transpose(patches_imgs_test, (0, 2, 3, 1))
    test_masks = np.transpose(test_masks, (0, 2, 3, 1))
    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
Beispiel #2
0
def get_data_training(train_imgs_original,
                      train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs,
                      inside_FOV, dataset,
                      path_experiment, fcn=True):
    train_imgs_original = load_hdf5(train_imgs_original)
    train_masks = load_hdf5(train_groudTruth)  # masks always the same

    visualize(group_images(train_imgs_original[:, :, :, :], 5),
              path_experiment + 'imgs_train')  # check original imgs train

    # TODO: preprocessing
    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks / 255.
    if dataset == 'DRIVE':
        train_imgs = train_imgs[:, :, 9:574, :]  # cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, 9:574, :]  # cut bottom and top so now it is 565*565
    elif dataset == 'STARE':
        train_imgs = train_imgs[:, :, :, 15:685]
        train_masks = train_masks[:, :, :, 15:685]
    else:
        train_imgs = train_imgs[:, :, :, 19:979]
        train_masks = train_masks[:, :, :, 19:979]
    data_consistency_check(train_imgs, train_masks)

    # check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' + str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    # extract the TRAINING patches from the full images
    if fcn:
        patches_imgs_train, patches_masks_train = extract_random_patches(train_imgs_original, train_imgs, train_masks,
                                                                         patch_height, patch_width,
                                                                         N_subimgs,
                                                                         inside=inside_FOV)
        data_consistency_check(patches_imgs_train, patches_masks_train)
    else:
        patches_imgs_train, patches_masks_train = extract_random(train_imgs_original, train_imgs, train_masks,
                                                                 patch_height, patch_width,
                                                                 N_subimgs,
                                                                 inside=inside_FOV)
    ##Fourier transform of patches
    # TODO add hessian grangi
    # patches_imgs_train = my_PreProc_patches(patches_imgs_train)
    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " + str(np.min(patches_imgs_train)) + ' - ' + str(
        np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  # , patches_imgs_test, patches_masks_test
Beispiel #3
0
# pre_processing.py

if len(sys.argv) > 1:
    config_file = sys.argv[1]
else:
    config_file = './configuration.txt'
# ========= CONFIG FILE TO READ FROM =======
config = hf.parse_config(config_file)

# ===========================================
# run the training on invariant or local
dataset = config['dataset']
# original test images

test_images_file = config['test_images_file']
test_images = hf.load_hdf5(test_images_file)
full_img_height = test_images.shape[2]
full_img_width = test_images.shape[3]

# the masks
test_masks_file = config['test_masks_file']
test_border_masks = hf.load_hdf5(test_masks_file)
test_gt_file = config['test_gt_file']

# dimension of the patches
patch_height = config['patch_height']
patch_width = config['patch_width']
# the stride in case output with average
stride_height = config['stride_height']
stride_width = config['stride_width']