예제 #1
0
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      DRIVE_test_imgs_original, DRIVE_test_groudTruth,
                      Imgs_to_test, patch_height, patch_width, N_subimgs,
                      inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth)  #masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    # visualize(group_images(test_imgs_original[0:20,:,:,:],5),'imgs_test')   #check original imgs test
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    train_imgs = my_PreProc(train_imgs_original)
    test_imgs = my_PreProc(test_imgs_original)
    train_masks = train_masks / 255.
    test_masks = test_masks / 255.

    train_imgs = train_imgs[:, :,
                            9:574, :]  #cut bottom and top so now it is 565*565
    train_masks = train_masks[:, :, 9:
                              574, :]  #cut bottom and top so now it is 565*565
    test_imgs = test_imgs[:Imgs_to_test, :,
                          9:574, :]  #cut bottom and top so now it is 565*565
    test_masks = test_masks[:Imgs_to_test, :,
                            9:574, :]  #cut bottom and top so now it is 565*565

    data_consistency_check(train_imgs, train_masks)
    data_consistency_check(test_imgs, test_masks)

    #check masks are within 0-1
    assert (np.max(train_masks) == 1 and np.max(test_masks) == 1)
    assert (np.min(train_masks) == 0 and np.min(test_masks) == 0)

    print "\ntrain/test images/masks shape:"
    print train_imgs.shape
    print "train images range (min-max): " + str(
        np.min(train_imgs)) + ' - ' + str(np.max(train_imgs))
    print "test images range (min-max): " + str(
        np.min(test_imgs)) + ' - ' + str(np.max(test_imgs))
    print "train/test masks are within 0-1\n"

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs,
        inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)
    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    patches_masks_test = extract_ordered(test_masks, patch_height, patch_width)
    data_consistency_check(patches_imgs_test, patches_masks_test)

    print "\ntrain/test PATCHES images/masks shape:"
    print patches_imgs_train.shape
    print "train PATCHES images range (min-max): " + str(
        np.min(patches_imgs_train)) + ' - ' + str(np.max(patches_imgs_train))
    print "test PATCHES images range (min-max): " + str(
        np.min(patches_imgs_test)) + ' - ' + str(np.max(patches_imgs_test))

    return patches_imgs_train, patches_masks_train, patches_imgs_test, patches_masks_test
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      patch_height, patch_width, N_subimgs, angle, inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth)  #masks always the same

    train_imgs = my_PreProc(train_imgs_original)

    train_masks = train_masks / 255.

    train_imgs = train_imgs[:, :,
                            9:574, :]  #cut bottom and top so now it is 565*565
    train_masks = train_masks[:, :, 9:
                              574, :]  #cut bottom and top so now it is 565*565
    #train_imgs=train_imgs[np.newaxis,...]
    #train_masks=train_masks[np.newaxis,...]
    data_consistency_check(train_imgs, train_masks)

    #check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    extract_random(train_imgs, train_masks, patch_height, patch_width,
                   N_subimgs, angle, inside_FOV)
예제 #3
0
def get_data_training(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs,
                      inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) #masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train


    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255.

    train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    train_masks = train_masks[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    data_consistency_check(train_imgs,train_masks)

    #check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    print "\ntrain images/masks shape:"
    print train_imgs.shape
    print "train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs))
    print "train masks are within 0-1\n"

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs,inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print "\ntrain PATCHES images/masks shape:"
    print patches_imgs_train.shape
    print "train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train))

    return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
예제 #4
0
def preprocessing(data, color_channel):
    if color_channel == 1:
        data = my_PreProc(data)
    else:
        data = color_PreProc(data)
    data = data / 255
    return data
예제 #5
0
def get_data_predict_overlap(imgPredict, patch_height, patch_width,
                             stride_height, stride_width, num_lesion,
                             total_data):
    ### test
    img = imgPredict
    test_imgs_original = []
    test_imgs_original.append(img)
    test_imgs_original = np.asarray(test_imgs_original)
    test_imgs_original = np.transpose(test_imgs_original, (0, 3, 1, 2))
    test_imgs = my_PreProc(test_imgs_original)

    #extend both images and masks so they can be divided exactly by the patches dimensions
    #test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    #test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width,
                                     stride_height, stride_width)

    #check masks are within 0-1

    print("\ntest images shape:")
    print(test_imgs.shape)

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs, patch_height,
                                                patch_width, stride_height,
                                                stride_width)

    print("\ntest PATCHES images shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +
          str(np.min(patches_imgs_test)) + ' - ' +
          str(np.max(patches_imgs_test)))

    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3]
def get_data_testing_overlap(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width, stride_height, stride_width):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks/255.
    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    #check masks are within 0-1
    assert(np.max(test_masks)==1  and np.min(test_masks)==0)

    print("\ntest images shape:")
    print(test_imgs.shape)
    print("\ntest mask shape:")
    print(test_masks.shape)
    print("test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width)

    print("\ntest PATCHES images shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test)))

    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
예제 #7
0
def get_data_testing_overlap(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width, stride_height, stride_width):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks/255.
    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    #check masks are within 0-1
    assert(np.max(test_masks)==1  and np.min(test_masks)==0)

    print "\ntest images shape:"
    print test_imgs.shape
    print "\ntest mask shape:"
    print test_masks.shape
    print "test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs))
    print "test masks are within 0-1\n"

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width)

    print "\ntest PATCHES images shape:"
    print patches_imgs_test.shape
    print "test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test))

    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
def get_data_training_add_rotation_more(imgrot, maskrot, patch_height,
                                        patch_width, N_subimgs, angle,
                                        inside_FOV):
    train_imgs_original = imgrot
    train_masks = maskrot
    train_imgs_original = train_imgs_original[np.newaxis, ...]
    train_masks = train_masks[np.newaxis, ...]
    ##visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train

    train_imgs = my_PreProc(train_imgs_original)
    #train_masks = train_masks / 255.
    print('mask_min_max_value:', np.min(train_masks), np.max(train_masks))

    train_imgs = train_imgs[:, :, 9:
                            574, :]  # cut bottom and top so now it is 565*565
    train_masks = train_masks[:, :, 9:
                              574, :]  # cut bottom and top so now it is 565*565
    # train_thin_masks=train_thin_masks[:,:,9:574,:]
    data_consistency_check(train_imgs, train_masks)

    # check masks are within 0-1
    assert (int(np.min(train_masks)) == 0 and int(np.max(train_masks)) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    # extract the TRAINING patches from the full images
    extract_random(train_imgs, train_masks, patch_height, patch_width,
                   N_subimgs, angle, inside_FOV)
예제 #9
0
def get_data_testing_overlap(test_imgs, test_groudTruth, Imgs_to_test, patch_height,patch_width,stride_height,stride_width) :
    test_imgs_original = load_hdf5(test_imgs)
    test_masks = load_hdf5(test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks/255.
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    assert(np.max(test_masks)==1  and np.min(test_masks)==0)

    print ("\ntest images shape:")
    print (test_imgs.shape)
    print ("\ntest mask shape:")
    print (test_masks.shape)
    print ("test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs)))
    print ("test masks are within 0-1\n")

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width)

    print ("\ntest PATCHES images shape:")
    print (patches_imgs_test.shape)
    print ("test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test)))

    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
예제 #10
0
def get_data_training(train_imgs_original, train_groundTruth, patch_height,
                      patch_width, N_subimgs, inside_FOV, patches):

    # Load train images from hdf5 files from pre-processing
    train_imgs_original = load_hdf5(train_imgs_original)
    train_groundTruth = load_hdf5(train_groundTruth)  #masks always the same

    # Normalize images
    train_imgs = my_PreProc(train_imgs_original)
    train_groundTruth = train_groundTruth / 255.
    visualize(group_images(train_imgs[100:120, :, :, :], 5),
              'imgs_train')  #.show()  #check original imgs train

    # shuffle indices to shuffle data
    idx = np.random.permutation(train_imgs.shape[0])
    train_imgs = train_imgs[idx]
    train_groundTruth = train_groundTruth[idx]

    visualize(group_images(train_imgs[100:120, :, :, :], 5),
              'imgs_train_random')
    visualize(group_images(train_groundTruth[100:120, :, :, :], 5),
              'gTruths_train_random')

    #train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    #train_groundTruth = train_groundTruth[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    # data_consistency_check(train_imgs,train_groundTruth)

    # Check masks are within 0-1
    assert (np.min(train_groundTruth) == 0 and np.max(train_groundTruth) == 1)

    print "train images shape:" + str(train_imgs.shape)
    print "train images range (min-max): " + str(
        np.min(train_imgs)) + ' - ' + str(np.max(train_imgs))
    print "train ground truths shape:" + str(train_groundTruth.shape)
    print "train ground truths range (min-max): " + str(
        np.min(train_groundTruth)) + ' - ' + str(np.max(train_groundTruth))

    if patches == True:
        # Extract the TRAINING patches from the full images
        patches_imgs_train, patches_groundTruths_train = extract_random(
            train_imgs, train_groundTruth, patch_height, patch_width,
            N_subimgs, inside_FOV)
        data_consistency_check(patches_imgs_train, patches_groundTruths_train)

        print "train PATCHES images shape: " + str(patches_imgs_train.shape)
        print "train PATCHES images range (min-max): " + str(
            np.min(patches_imgs_train)) + ' - ' + str(
                np.max(patches_imgs_train))
        print "train PATCHES ground truths shape: " + str(
            patches_groundTruths_train.shape)
        print "train PATCHES ground truths range (min-max): " + str(
            np.min(patches_groundTruths_train)) + ' - ' + str(
                np.max(patches_groundTruths_train))

        # visualize(group_images(patches_imgs_train[100:120,:,:,:],5),'imgs_train_patches')
        # visualize(group_images(patches_groundTruths_train[100:120,:,:,:],5),'gTruth_train_patches')

        return patches_imgs_train, patches_groundTruths_train
    else:
        return train_imgs, train_groundTruth
예제 #11
0
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth,
                     DRIVE_test_border, batch_h, batch_w):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks / 255.

    heigth = test_imgs.shape[2]
    width = test_imgs.shape[3]

    subWidth = width % batch_w
    subheigth = heigth % batch_h

    subWidth2 = int(subWidth / 2)
    subheigth2 = int(subheigth / 2)

    test_imgs = test_imgs[:, :, subheigth2:heigth - subheigth + subheigth2,
                          subWidth2:width - subWidth + subWidth2]
    test_masks = test_masks[:, :, subheigth2:heigth - subheigth + subheigth2,
                            subWidth2:width - subWidth + subWidth2]
    if DRIVE_test_border != "":
        test_borders = load_hdf5(DRIVE_test_border)
        test_borders = test_borders[:, :,
                                    subheigth2:heigth - subheigth + subheigth2,
                                    subWidth2:width - subWidth + subWidth2]
        return test_imgs, test_masks, test_borders
    else:
        return test_imgs, test_masks
예제 #12
0
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      batch_h, batch_w):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    print("train_imgs_original.shape = ", train_imgs_original.shape)
    train_masks = load_hdf5(DRIVE_train_groudTruth)  #masks always the same
    print("train_masks.shape = ", train_masks.shape)

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks / 255.

    heigth = train_imgs.shape[2]
    width = train_imgs.shape[3]

    subWidth = width % batch_w
    subheigth = heigth % batch_h

    print(subheigth, "*", subWidth, "pixels will be cropped.")

    subWidth2 = int(subWidth / 2)
    subheigth2 = int(subheigth / 2)
    print(subheigth2, "*", subWidth2, "in the top-left of the image.")

    train_imgs = train_imgs[:, :, subheigth2:heigth - subheigth + subheigth2,
                            subWidth2:width - subWidth + subWidth2]
    train_masks = train_masks[:, :, subheigth2:heigth - subheigth + subheigth2,
                              subWidth2:width - subWidth + subWidth2]

    print("cropped train_imgs_original.shape = ", train_imgs_original.shape)
    print("cropped train_masks.shape = ", train_masks.shape)

    return train_imgs, train_masks
예제 #13
0
def get_data_training(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) 

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255

    data_consistency_check(train_imgs,train_masks)

    #check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    print ("\ntrain images/masks shape:")
    print (train_imgs.shape)
    print ("train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs)))
    print ("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print ("\ntrain PATCHES images/masks shape:")
    print (patches_imgs_train.shape)
    print ("train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
예제 #14
0
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      patch_height, patch_width, N_subimgs, inside_FOV,
                      num_lesion, total_data):

    train_imgs_original = load_hdf5(
        DRIVE_train_imgs_original)  #[img_id:img_id+1]
    train_masks = np.zeros([
        total_data, 1, train_imgs_original.shape[2],
        train_imgs_original.shape[3]
    ])

    train_masks_temp = load_hdf5(
        DRIVE_train_groudTruth +
        '.hdf5')  #[img_id:img_id+1]#masks always the same
    train_masks[:, 0, :, :] = train_masks_temp[:, 0, :, :]
    print("mask:", train_masks_temp.shape)
    print(train_masks[:, 0, :, :].shape)

    print(train_imgs_original[:, 0, :, :].shape)
    train_imgs = my_PreProc(train_imgs_original)

    print(train_imgs[:, 0, :, :].shape)
    train_masks = train_masks / 255.

    train_imgs = train_imgs[:, :,
                            7:429, :]  #cut bottom and top so now it is 422*422
    train_masks = train_masks[:, :, 7:
                              429, :]  #cut bottom and top so now it is 422*422
    data_consistency_check(train_imgs, train_masks)

    #check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images shape:")
    print(train_imgs.shape)
    print("\ntrain masks shape:")
    print(train_masks.shape)
    print("train images 0 range (min-max): " + str(np.min(train_imgs[:, 0])) +
          ' - ' + str(np.max(train_imgs[:, 0])))
    # print "train images 1 range (min-max): " +str(np.min(train_imgs[:,1])) +' - '+str(np.max(train_imgs[:,1]))
    #print "train images 2 range (min-max): " +str(np.min(train_imgs[:,2])) +' - '+str(np.max(train_imgs[:,2]))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs,
        inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print(patches_masks_train.shape)
    print("train PATCHES images range (min-max): " +
          str(np.min(patches_imgs_train)) + ' - ' +
          str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  #, patches_imgs_test, patches_masks_test
예제 #15
0
def split_to_patches(samples,
                     labels,
                     patch_height=48,
                     patch_width=48):
    samples = my_PreProc(samples)
    labels /= 255.0
    patches_imgs_test = extract_ordered(samples, patch_height, patch_width)
    patches_masks_test = extract_ordered(labels, patch_height, patch_width)
    return patches_imgs_test, patches_masks_test
def get_data_random_training(path_dir, sample_size, image_type):
    train_imgs_original, train_masks = load_random_rgb(path_dir, sample_size,train_tag, image_type)
    train_imgs = my_PreProc(train_imgs_original)
    print "\ntrain images shape:"
    print train_imgs.shape
    print "train images range (min-max): " + str(np.min(train_imgs)) + ' - ' + str(np.max(train_imgs))
    print "\ntrain masks shape:"
    print train_masks.shape
    return train_imgs, train_masks
def get_data_training(
        DRIVE_train_imgs_original,  # 训练图像路径
        DRIVE_train_groudTruth,  # 金标准图像路径
        patch_height,
        patch_width,
        N_subimgs,
        inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(
        DRIVE_train_groudTruth
    )  # masks always the same,作者在代码中的mask代表ground truth
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  # check original imgs train
    # my_PreProc() 进行标准化,自适应直方图均衡,查找表拉伸直方图,再转化到0~1
    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks / 255.
    # ground turth也转化为0~1 ??? ==> 白色为255,黑色为0,/255后,mask只包含0,1两个值

    if dataset == 'DRIVE':
        train_imgs = train_imgs[:, :, 9:
                                574, :]  # cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, 9:
                                  574, :]  # cut bottom and top so now it is 565*565
    elif dataset == 'STARE':
        train_imgs = train_imgs[:, :, :, 15:685]
        train_masks = train_masks[:, :, :, 15:685]
    elif dataset == 'CHASE':
        train_imgs = train_imgs[:, :, :, 19:979]
        train_masks = train_masks[:, :, :, 19:979]
    elif dataset == 'HRF':
        train_imgs = train_imgs[:, :, :, 19:979]
        train_masks = train_masks[:, :, :, 19:979]

    data_consistency_check(train_imgs, train_masks)

    # check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    # extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs,
        inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +
          str(np.min(patches_imgs_train)) + ' - ' +
          str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  # patches_imgs_test, patches_masks_test
def get_data_training(path_dir, train_full_img, train_tag, image_type):
    train_imgs_original = load_rgb(path_dir + train_full_img, image_type)
    train_masks = to_categorical(load_y_lable(path_dir + train_tag)['primary'], 2)
    train_imgs = my_PreProc(train_imgs_original)
    print "\ntrain images shape:"
    print train_imgs.shape
    print "train images range (min-max): " + str(np.min(train_imgs)) + ' - ' + str(np.max(train_imgs))
    print "\ntrain masks shape:"
    print train_masks.shape
    return train_imgs, train_masks  
예제 #19
0
    def get_data_training(train_imgs, train_groudTruth, patch_height,patch_width, N_subimgs,inside_FOV):
    train_imgs_original = load_hdf5(train_imgs)
    train_masks = load_hdf5(train_groudTruth) 
    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255.

    data_consistency_check(train_imgs,train_masks)
    patches_imgs_train, patches_masks_train = extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs,inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    return patches_imgs_train, patches_masks_train
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth, patch_height, patch_width, N_subimgs, inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) #masks always the same

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255.

    train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    train_masks = train_masks[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    data_consistency_check(train_imgs,train_masks)

    #check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      DRIVE_train_bordermasks, patch_height, patch_width,
                      N_subimgs, inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth)  #masks always the same
    train_bordermasks = load_hdf5(
        DRIVE_train_bordermasks)  #bordermasks always the same

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks / 255.
    if fine_tuning == False:

        train_imgs = train_imgs[:, :, 9:
                                574, :]  #cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, 9:
                                  574, :]  #cut bottom and top so now it is 565*565
        data_consistency_check(train_imgs, train_masks)
    elif fine_tuning == True:
        train_imgs = train_imgs[:, :, :, :]  #cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, :, :]  #cut bottom and top so now it is 565*565
        train_bordermasks = train_bordermasks[:, :, :, :]
        data_consistency_check(train_imgs, train_masks)

    #check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)
    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    if fine_tuning == False:
        patches_imgs_train, patches_masks_train = extract_random(
            train_imgs, train_masks, patch_height, patch_width, N_subimgs,
            inside_FOV)
        data_consistency_check(patches_imgs_train, patches_masks_train)
    else:
        patches_imgs_train, patches_masks_train = extract_random_ft(
            train_imgs, train_masks, train_bordermasks, patch_height,
            patch_width, N_subimgs, inside_FOV)
        data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +
          str(np.min(patches_imgs_train)) + ' - ' +
          str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  #, patches_imgs_test, patches_masks_test
def get_data_testing_overlap(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width, stride_height, stride_width):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks/255.
    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    #check masks are within 0-1
    assert(np.max(test_masks)==1  and np.min(test_masks)==0)
예제 #23
0
def process_img(i, filename, imgs_dir, groundTruth_dir, add_noise):
    #original
    img = np.asarray(Image.open(imgs_dir + filename).convert('L'))
    g_truth = np.asarray(Image.open(groundTruth_dir + filename).convert('L'))

    if add_noise:
        half_shape = (np.asarray(img.shape) / 2.).astype(np.int)
        noise = cv2.resize(np.random.normal(scale=2, size=half_shape),
                           dsize=img.shape)
        img = np.clip(img + noise, 0, 255).astype(np.uint8)

    img = np.reshape(img, (1, 1, img.shape[0], img.shape[1]))
    g_truth = np.reshape(g_truth, (1, 1, g_truth.shape[0], g_truth.shape[1]))

    # test imgs
    assert (np.max(img) <= 255)
    assert (np.min(img) >= 0)
    assert (img.shape == (1, 1, height, width))
    # test g_truths
    assert (np.max(g_truth) <= 255)
    assert (np.min(g_truth) >= 0)
    assert (g_truth.shape == (1, 1, height, width))

    # extract patches
    img = my_PreProc(img)
    img_data, new_image_size, n_subimgs = extract_ordered_overlap(
        img, patch_size, stride_size)
    img_data = np.transpose(img_data, (0, 2, 3, 1)).astype(np.uint8)
    mean_img = np.mean(img_data)
    var_img = np.var(img_data)
    # preprocess img
    gt_data, _, _ = extract_ordered_overlap(g_truth, patch_size, stride_size)
    gt_data = np.transpose(gt_data, (0, 2, 3, 1)).astype(np.uint8)
    mean_gt = np.mean(gt_data)
    var_gt = np.var(gt_data)

    payload = []
    for j in range(img_data.shape[0]):
        encoded_img_string = cv2.imencode('.png', img_data[j])[1].tostring()
        encoded_gt_string = cv2.imencode('.png', gt_data[j])[1].tostring()
        feature = {
            'image': _bytes_feature(tf.compat.as_bytes(encoded_img_string)),
            'label': _bytes_feature(tf.compat.as_bytes(encoded_gt_string)),
        }
        tf_example = tf.train.Example(features=tf.train.Features(
            feature=feature))
        payload.append(tf_example.SerializeToString())

    return mean_img, var_img, mean_gt, var_gt, new_image_size, payload, i
def get_data_training_rotate(train_imgs_original,
                             train_groudTruth,
                             patch_height,
                             patch_width,
                             N_subimgs,
                             inside_FOV,
                             dataset='DRIVE'):
    train_imgs_original = load_hdf5(train_imgs_original)
    train_masks = load_hdf5(train_groudTruth) # masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255.

    if dataset == 'DRIVE':
        train_imgs = train_imgs[:, :, 9:574, :]  # cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, 9:574, :]  # cut bottom and top so now it is 565*565
    elif dataset == 'CHASE':
        train_imgs = train_imgs[:, :, :, 19:979]  # cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, :, 19:979]  # cut bottom and top so now it is 565*565
    print("train_imgs shape:", train_imgs.shape)
    data_consistency_check(train_imgs, train_masks)

    # check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' + str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    # extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random_rotate(train_imgs, train_masks, patch_height, patch_width,
                                                                    N_subimgs, inside_FOV)
    # random shuffle
    index = [i for i in range(N_subimgs)]
    random.shuffle(index)
    patches_imgs_train = patches_imgs_train[index]
    patches_masks_train = patches_masks_train[index]
    print("Random Shuffled!")

    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " + str(np.min(patches_imgs_train)) + ' - ' + str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  # , patches_imgs_test, patches_masks_test
예제 #25
0
def get_data_training_overlap(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      DRIVE_train_borders,
                      patch_height,
                      patch_width,
                      N_subimgs,
                      inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) #masks always the same
    train_borders = load_hdf5(DRIVE_train_borders)
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train


    train_imgs = my_PreProc(train_imgs_original)#This applied contrast and gamma correction
    train_masks = train_masks/255.
    train_borders=train_borders/255.
    
    ######## This is to debug visualization
    
    #print(np.shape(train_borders))
    #tb=np.reshape(train_borders[0,0,:,:],(512,512))
    #plt.imshow(tb)
    ######################### end debug initialization

    #train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    #train_masks = train_masks[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    data_consistency_check(train_imgs,train_masks,train_borders)

    #check masks are within 0-1
    print(np.min(train_masks),np.max(train_masks))
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)
    assert(np.min(train_borders)==0 and np.max(train_borders)==1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train, patches_borders_train = extract_ordered_overlap_train(train_imgs,train_masks,train_borders,patch_height,patch_width)
    data_consistency_check(patches_imgs_train, patches_masks_train,patches_borders_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
예제 #26
0
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth,
                     Imgs_to_test, patch_height, patch_width, num_lesion,
                     total_data):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = np.zeros([
        total_data, 1, test_imgs_original.shape[2], test_imgs_original.shape[3]
    ])

    test_masks_temp = load_hdf5(
        DRIVE_test_groudTruth +
        '.hdf5')  #[img_id:img_id+1]#masks always the same
    test_masks[:, 0, :, :] = test_masks_temp[:, 0, :, :]

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks / 255.

    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]
    test_masks = test_masks[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border(test_imgs, patch_height, patch_width)
    test_masks = paint_border(test_masks, patch_height, patch_width)

    data_consistency_check(test_imgs, test_masks)

    #check masks are within 0-1
    assert (np.max(test_masks) == 1 and np.min(test_masks) == 0)

    print("\ntest images/masks shape:")
    print(test_imgs.shape)
    print("test images range (min-max): " + str(np.min(test_imgs)) + ' - ' +
          str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    patches_masks_test = extract_ordered(test_masks, patch_height, patch_width)
    data_consistency_check(patches_imgs_test, patches_masks_test)

    print("\ntest PATCHES images/masks shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +
          str(np.min(patches_imgs_test)) + ' - ' +
          str(np.max(patches_imgs_test)))

    return patches_imgs_test, patches_masks_test
예제 #27
0
def get_data_testing(test_imgs_original, test_groudTruth, Imgs_to_test,
                     patch_height, patch_width):

    # Load test images from hdf5 files from pre-processing
    test_imgs_original = load_hdf5(test_imgs_original)
    test_groundTruths = load_hdf5(test_groudTruth)

    # Normalize test ground truths
    test_imgs = my_PreProc(test_imgs_original)
    test_groundTruths = test_groundTruths / 255.

    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]
    test_groundTruths = test_groundTruths[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border(test_imgs, patch_height, patch_width)
    test_groundTruths = paint_border(test_groundTruths, patch_height,
                                     patch_width)

    data_consistency_check(test_imgs, test_groundTruths)

    # Check masks are within 0-1
    assert (np.max(test_groundTruths) == 1 and np.min(test_groundTruths) == 0)

    print "train images shape:" + str(test_imgs.shape)
    print "train images range (min-max): " + str(
        np.min(test_imgs)) + ' - ' + str(np.max(test_imgs))
    print "train ground truths shape:" + str(test_groundTruths.shape)
    print "train ground truths range (min-max): " + str(
        np.min(test_groundTruths)) + ' - ' + str(np.max(test_groundTruths))

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    patches_groundTruths_test = extract_ordered(test_groundTruths,
                                                patch_height, patch_width)
    data_consistency_check(patches_imgs_test, patches_groundTruths_test)

    print "test PATCHES images shape: " + patches_imgs_test.shape
    print "test PATCHES images range (min-max): " + str(
        np.min(patches_imgs_test)) + ' - ' + str(np.max(patches_imgs_test))
    print "test PATCHES ground truths shape: " + patches_groundTruths_test.shape
    print "test PATCHES ground truths range (min-max): " + str(
        np.min(patches_groundTruths_test)) + ' - ' + str(
            np.max(patches_groundTruths_test))

    return patches_imgs_test, patches_groundTruths_test
예제 #28
0
def get_data_training(
        DRIVE_train_imgs_original,  #训练图像路径
        DRIVE_train_groudTruth,  #金标准图像路径
        patch_height,
        patch_width,
        N_subimgs,
        inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth)  #masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train

    train_imgs = my_PreProc(train_imgs_original)  # 图像预处理 归一化等
    train_masks = train_masks / 255.

    train_imgs = train_imgs[:, :, 9:
                            574, :]  #cut bottom and top so now it is 565*565  # 图像裁剪 size=565*565
    train_masks = train_masks[:, :, 9:
                              574, :]  #cut bottom and top so now it is 565*565 # 图像裁剪 size=565*565
    data_consistency_check(train_imgs, train_masks)  # 训练图像和金标准图像一致性检查

    #check masks are within 0-1
    assert (np.min(train_masks) == 0
            and np.max(train_masks) == 1)  #金标准图像 2类 0-1

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    # 从整张图像中-随机提取-训练子块
    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs,
        inside_FOV)
    data_consistency_check(patches_imgs_train,
                           patches_masks_train)  # 训练图像子块和金标准图像子块一致性检查

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +
          str(np.min(patches_imgs_train)) + ' - ' +
          str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  #, patches_imgs_test, patches_masks_test
예제 #29
0
def get_data_testing(test_imgs, test_groudTruth, Imgs_to_test, patch_height, patch_width):
    test_imgs_original = load_hdf5(test_imgs)
    test_masks = load_hdf5(test_groudTruth)
    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks/255.


    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border(test_imgs,patch_height,patch_width)
    test_masks = paint_border(test_masks,patch_height,patch_width)
    data_consistency_check(test_imgs, test_masks)
   
    patches_imgs_test = extract_ordered(test_imgs,patch_height,patch_width)
    patches_masks_test = extract_ordered(test_masks,patch_height,patch_width)
    data_consistency_check(patches_imgs_test, patches_masks_test)

    return patches_imgs_test, patches_masks_test
예제 #30
0
    def pre_process(self, imgs_original, masks):

        test_imgs = my_PreProc(imgs_original)

        # Pad images so they can be divided exactly by the patches dimensions
        test_imgs = paint_border_overlap(test_imgs, self.patch_x, self.patch_y,
                                         self.stride_x, self.stride_y)
        test_masks = paint_border_overlap(masks, self.patch_x, self.patch_y,
                                          self.stride_x, self.stride_y)

        # Extract patches from the full images
        patches_imgs_test = extract_ordered_overlap(test_imgs, self.patch_x,
                                                    self.patch_y,
                                                    self.stride_x,
                                                    self.stride_y)

        return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[
            3], test_masks
예제 #31
0
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      patch_height, patch_width, N_subimgs, inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth)  #masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks / 255.

    #    train_imgs = train_imgs[:,:,2:992,2:992]  #cut bottom and top so now it is 990*990
    #    train_masks = train_masks[:,:,2:992,2:992]  #cut bottom and top so now it is 990*990
    #    train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    #    train_masks = train_masks[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    train_imgs = train_imgs[:, :, 47:
                            605, :]  #cut bottom and top so now it is 565*565
    train_masks = train_masks[:, :, 47:
                              605, :]  #cut bottom and top so now it is 565*565
    data_consistency_check(train_imgs, train_masks)

    #check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs,
        inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +
          str(np.min(patches_imgs_train)) + ' - ' +
          str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  #, patches_imgs_test, patches_masks_test
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth,
                     Imgs_to_test, patch_height, patch_width):
    # test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks / 255.

    # extend both images and masks so they can be divided exactly by the patches dimensions
    '''
    为了保证能分出整数的patch,需要将test_imgs进行补0扩展——paint_border()
    '''
    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]  # Imgs_to_test = 20
    test_masks = test_masks[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border(test_imgs, patch_height, patch_width)
    test_masks = paint_border(test_masks, patch_height, patch_width)

    data_consistency_check(test_imgs, test_masks)

    # check masks are within 0-1
    assert (np.max(test_masks) == 1 and np.min(test_masks) == 0)

    print("\ntest images/masks shape:")
    print(test_imgs.shape)
    print("test images range (min-max): " + str(np.min(test_imgs)) + ' - ' +
          str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    # extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    patches_masks_test = extract_ordered(test_masks, patch_height, patch_width)
    data_consistency_check(patches_imgs_test, patches_masks_test)

    print("\ntest PATCHES images/masks shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +
          str(np.min(patches_imgs_test)) + ' - ' +
          str(np.max(patches_imgs_test)))

    return patches_imgs_test, patches_masks_test
예제 #33
0
def predict_challenge(challenge_folder, challenge_predicted_folder, plot=False):
    challenge_list = ISIC.list_from_folder(challenge_folder)
    challenge_resized_folder = challenge_folder + "_{}_{}".format(height, width)

    if not os.path.exists(challenge_resized_folder):
        print "Creating resized challenge images for prediction"
        ISIC.resize_images(challenge_list,
                           input_image_folder=challenge_folder,
                           input_mask_folder=None,
                           output_image_folder=challenge_resized_folder,
                           output_mask_folder=None,
                           height=height, width=width)

    challenge_images = ISIC.load_images(challenge_list, height, width, challenge_resized_folder)

    if pre_proc:
        challenge_images = my_PreProc(challenge_images)
    challenge_images = challenge_images - train_mean

    model_name = model_filename.split('.')[1].split('/')[2]
    try:
        if test_aug:
            mask_pred_challenge = pkl.load(open(os.path.join(challenge_predicted_folder, model_name + '_testaug.pkl'), 'rb'))
        else:
            mask_pred_challenge = pkl.load(open(os.path.join(challenge_predicted_folder, model_name + '.pkl'), 'rb'))
    except:
        model.load_weights(model_filename)
        if test_aug:
            print "Predicting using test data augmentation"
            mask_pred_challenge = np.array([my_predict(model, x) for x in tqdm(challenge_images)])
            with open(os.path.join(challenge_predicted_folder, model_name + '_testaug.pkl'), 'wb') as f:
                pkl.dump(mask_pred_challenge, f)
        else:
            print "Predicting"
            mask_pred_challenge = model.predict(challenge_images, batch_size=batch_size)
            mask_pred_challenge = mask_pred_challenge[:, 0, :, :] # remove channel dimension
            with open(os.path.join(challenge_predicted_folder, model_name + '.pkl'), 'wb') as f:
                pkl.dump(mask_pred_challenge, f)
    mask_pred_challenge = np.where(mask_pred_challenge>=0.5, 1, 0)
    mask_pred_challenge = mask_pred_challenge * 255
    mask_pred_challenge = mask_pred_challenge.astype(np.uint8)

    if not test_aug:
        challenge_predicted_folder = os.path.join(challenge_predicted_folder, model_name)
    else:
        challenge_predicted_folder = os.path.join(challenge_predicted_folder, model_name + '_testaug')
    if not os.path.exists(challenge_predicted_folder):
        os.makedirs(challenge_predicted_folder)

    print "Start predicting masks of original shapes"
    imgs = []
    mask_preds = []
    for i in trange(len(challenge_list)):
        img, mask_pred = ISIC.show_images_full_sized(challenge_list,
                                                     img_mask_pred_array=mask_pred_challenge,
                                                     image_folder=challenge_folder,
                                                     mask_folder=None,
                                                     index=i,
                                                     output_folder=challenge_predicted_folder,
                                                     plot=plot)
        #imgs.append(img)
        #mask_preds.append(mask_pred)
    return imgs, mask_preds
예제 #34
0
                               output_mask_folder=mask_folder_ph2,
                               height=height,
                               width=width)
    print "Loading images"
    filenames = [x + '.jpg' for x in image_names]
    train_ph2, train_mask_ph2 = ISIC.load_images(filenames, height, width, image_folder_ph2, mask_folder_ph2)
    print "Done loading images"
    train = np.concatenate([train, train_ph2], axis=0)
    train_mask = np.concatenate([train_mask, train_mask_ph2], axis=0)

print train.shape, train_mask.shape
print val.shape, val_mask.shape

# preprocessing using histogram equalization
if pre_proc:
    train = my_PreProc(train)
    val = my_PreProc(val)

# remove mean of training data
train_mean = np.mean(train, axis=(0, 2, 3), keepdims=True)[0]
print "train mean is", train_mean.reshape(3)
train = train - train_mean
val = val - train_mean

def model_naming(model_name, size, loss, pre_proc, use_archive, use_ph2):
    model_filename = "./weights2018/{}_{}_{}".format(model_name, size, loss_param)
    if pre_proc:
        model_filename += '_preproc'
    if use_archive:
        model_filename += '_archive'
    if use_ph2:
예제 #35
0
predictions = model.predict(patches_imgs_test, batch_size=32, verbose=2)
print "predicted images size :"
print predictions.shape

#===== Convert the prediction arrays in corresponding images
pred_patches = pred_to_imgs(predictions,"original")



#========== Elaborate and visualize the predicted images ====================
pred_imgs = None
orig_imgs = None
gtruth_masks = None
if average_mode == True:
    pred_imgs = recompone_overlap(pred_patches, new_height, new_width, stride_height, stride_width)# predictions
    orig_imgs = my_PreProc(test_imgs_orig[0:pred_imgs.shape[0],:,:,:])    #originals
    gtruth_masks = masks_test  #ground truth masks
else:
    pred_imgs = recompone(pred_patches,13,12)       # predictions
    orig_imgs = recompone(patches_imgs_test,13,12)  # originals
    gtruth_masks = recompone(patches_masks_test,13,12)  #masks
# apply the DRIVE masks on the repdictions #set everything outside the FOV to zero!!
kill_border(pred_imgs, test_border_masks)  #DRIVE MASK  #only for visualization
## back to original dimensions
orig_imgs = orig_imgs[:,:,0:full_img_height,0:full_img_width]
pred_imgs = pred_imgs[:,:,0:full_img_height,0:full_img_width]
gtruth_masks = gtruth_masks[:,:,0:full_img_height,0:full_img_width]
print "Orig imgs shape: " +str(orig_imgs.shape)
print "pred imgs shape: " +str(pred_imgs.shape)
print "Gtruth imgs shape: " +str(gtruth_masks.shape)
visualize(group_images(orig_imgs,N_visual),path_experiment+"all_originals")#.show()