コード例 #1
0
def get_data_training(train_imgs_original, train_groundTruth, patch_height,
                      patch_width, N_subimgs, inside_FOV, patches):

    # Load train images from hdf5 files from pre-processing
    train_imgs_original = load_hdf5(train_imgs_original)
    train_groundTruth = load_hdf5(train_groundTruth)  #masks always the same

    # Normalize images
    train_imgs = my_PreProc(train_imgs_original)
    train_groundTruth = train_groundTruth / 255.
    visualize(group_images(train_imgs[100:120, :, :, :], 5),
              'imgs_train')  #.show()  #check original imgs train

    # shuffle indices to shuffle data
    idx = np.random.permutation(train_imgs.shape[0])
    train_imgs = train_imgs[idx]
    train_groundTruth = train_groundTruth[idx]

    visualize(group_images(train_imgs[100:120, :, :, :], 5),
              'imgs_train_random')
    visualize(group_images(train_groundTruth[100:120, :, :, :], 5),
              'gTruths_train_random')

    #train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    #train_groundTruth = train_groundTruth[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    # data_consistency_check(train_imgs,train_groundTruth)

    # Check masks are within 0-1
    assert (np.min(train_groundTruth) == 0 and np.max(train_groundTruth) == 1)

    print "train images shape:" + str(train_imgs.shape)
    print "train images range (min-max): " + str(
        np.min(train_imgs)) + ' - ' + str(np.max(train_imgs))
    print "train ground truths shape:" + str(train_groundTruth.shape)
    print "train ground truths range (min-max): " + str(
        np.min(train_groundTruth)) + ' - ' + str(np.max(train_groundTruth))

    if patches == True:
        # Extract the TRAINING patches from the full images
        patches_imgs_train, patches_groundTruths_train = extract_random(
            train_imgs, train_groundTruth, patch_height, patch_width,
            N_subimgs, inside_FOV)
        data_consistency_check(patches_imgs_train, patches_groundTruths_train)

        print "train PATCHES images shape: " + str(patches_imgs_train.shape)
        print "train PATCHES images range (min-max): " + str(
            np.min(patches_imgs_train)) + ' - ' + str(
                np.max(patches_imgs_train))
        print "train PATCHES ground truths shape: " + str(
            patches_groundTruths_train.shape)
        print "train PATCHES ground truths range (min-max): " + str(
            np.min(patches_groundTruths_train)) + ' - ' + str(
                np.max(patches_groundTruths_train))

        # visualize(group_images(patches_imgs_train[100:120,:,:,:],5),'imgs_train_patches')
        # visualize(group_images(patches_groundTruths_train[100:120,:,:,:],5),'gTruth_train_patches')

        return patches_imgs_train, patches_groundTruths_train
    else:
        return train_imgs, train_groundTruth
コード例 #2
0
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth,
                     DRIVE_test_border, batch_h, batch_w):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks / 255.

    heigth = test_imgs.shape[2]
    width = test_imgs.shape[3]

    subWidth = width % batch_w
    subheigth = heigth % batch_h

    subWidth2 = int(subWidth / 2)
    subheigth2 = int(subheigth / 2)

    test_imgs = test_imgs[:, :, subheigth2:heigth - subheigth + subheigth2,
                          subWidth2:width - subWidth + subWidth2]
    test_masks = test_masks[:, :, subheigth2:heigth - subheigth + subheigth2,
                            subWidth2:width - subWidth + subWidth2]
    if DRIVE_test_border != "":
        test_borders = load_hdf5(DRIVE_test_border)
        test_borders = test_borders[:, :,
                                    subheigth2:heigth - subheigth + subheigth2,
                                    subWidth2:width - subWidth + subWidth2]
        return test_imgs, test_masks, test_borders
    else:
        return test_imgs, test_masks
コード例 #3
0
def get_data_testing(test_images_file, test_gt_file, n_test_images,
                     patch_height, patch_width, channel, config):

    test_imgs_original = hf.load_hdf5(test_images_file)
    test_gt = hf.load_hdf5(test_gt_file)

    # extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:n_test_images, :, :, :]
    test_gt = test_gt[0:n_test_images, :, :, :]
    test_imgs = paint_border(test_imgs, patch_height, patch_width, channel)
    test_gt = paint_border(test_gt, patch_height, patch_width, channel)
    data_consistency_check(test_imgs, test_gt)

    # check masks are within 0-1
    assert np.min(test_gt) == 0
    print('-----------------------------------------------------------')
    print("test images/masks shape:", test_imgs.shape)
    print("test images range (min-max):{}-{} ".format(np.min(test_imgs),
                                                      np.max(test_imgs)))
    print("test masks are within 0-1\n")
    # extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    print("test PATCHES images/masks shape:", patches_imgs_test.shape)
    print("test PATCHES images range (min-max): {}-{} ".format(
        np.min(patches_imgs_test), np.max(patches_imgs_test)))

    print('-----------------------------------------------------------')
    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[
        3], test_gt, np.min(test_gt) + 1
コード例 #4
0
def get_data_training(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) 

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255

    data_consistency_check(train_imgs,train_masks)

    #check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    print ("\ntrain images/masks shape:")
    print (train_imgs.shape)
    print ("train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs)))
    print ("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print ("\ntrain PATCHES images/masks shape:")
    print (patches_imgs_train.shape)
    print ("train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
コード例 #5
0
def get_data_training(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs,
                      inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) #masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train


    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255.

    train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    train_masks = train_masks[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    data_consistency_check(train_imgs,train_masks)

    #check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    print "\ntrain images/masks shape:"
    print train_imgs.shape
    print "train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs))
    print "train masks are within 0-1\n"

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs,inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print "\ntrain PATCHES images/masks shape:"
    print patches_imgs_train.shape
    print "train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train))

    return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
コード例 #6
0
def get_data_training(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs,
                      inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) #masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255.

    train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    train_masks = train_masks[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    data_consistency_check(train_imgs,train_masks)

    # check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    # extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs,inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
コード例 #7
0
def get_data_testing_overlap(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width, stride_height, stride_width):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks/255.
    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    #check masks are within 0-1
    assert(np.max(test_masks)==1  and np.min(test_masks)==0)

    print("\ntest images shape:")
    print(test_imgs.shape)
    print("\ntest mask shape:")
    print(test_masks.shape)
    print("test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width)

    print("\ntest PATCHES images shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test)))

    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      patch_height, patch_width, N_subimgs, angle, inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth)  #masks always the same

    train_imgs = my_PreProc(train_imgs_original)

    train_masks = train_masks / 255.

    train_imgs = train_imgs[:, :,
                            9:574, :]  #cut bottom and top so now it is 565*565
    train_masks = train_masks[:, :, 9:
                              574, :]  #cut bottom and top so now it is 565*565
    #train_imgs=train_imgs[np.newaxis,...]
    #train_masks=train_masks[np.newaxis,...]
    data_consistency_check(train_imgs, train_masks)

    #check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    extract_random(train_imgs, train_masks, patch_height, patch_width,
                   N_subimgs, angle, inside_FOV)
コード例 #9
0
def get_data_training_color_seg(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) #masks always the same

    #print "In get_data_training( ) : # of images: " + str(train_imgs_original.shape[0]) + " # of labels: " +str(train_labels.shape[0])

    train_imgs = train_imgs_original/255.	# color images
    train_masks = train_masks/255. 

    #check masks are within 0-1
    #assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    print "\ntrain images/masks shape:"
    print train_imgs.shape
    print "train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs))

    #extract the TRAINING patches from the full images
    patches_imgs_train = extract_random(train_imgs, patch_height, patch_width, N_subimgs)

    print "\ntrain PATCHES images/masks shape:"
    print patches_imgs_train.shape
    print "train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train))

    return patches_imgs_train, train_masks
コード例 #10
0
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      batch_h, batch_w):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    print("train_imgs_original.shape = ", train_imgs_original.shape)
    train_masks = load_hdf5(DRIVE_train_groudTruth)  #masks always the same
    print("train_masks.shape = ", train_masks.shape)

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks / 255.

    heigth = train_imgs.shape[2]
    width = train_imgs.shape[3]

    subWidth = width % batch_w
    subheigth = heigth % batch_h

    print(subheigth, "*", subWidth, "pixels will be cropped.")

    subWidth2 = int(subWidth / 2)
    subheigth2 = int(subheigth / 2)
    print(subheigth2, "*", subWidth2, "in the top-left of the image.")

    train_imgs = train_imgs[:, :, subheigth2:heigth - subheigth + subheigth2,
                            subWidth2:width - subWidth + subWidth2]
    train_masks = train_masks[:, :, subheigth2:heigth - subheigth + subheigth2,
                              subWidth2:width - subWidth + subWidth2]

    print("cropped train_imgs_original.shape = ", train_imgs_original.shape)
    print("cropped train_masks.shape = ", train_masks.shape)

    return train_imgs, train_masks
コード例 #11
0
def get_data_testing_overlap(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width, stride_height, stride_width):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks/255.
    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    #check masks are within 0-1
    assert(np.max(test_masks)==1  and np.min(test_masks)==0)

    print "\ntest images shape:"
    print test_imgs.shape
    print "\ntest mask shape:"
    print test_masks.shape
    print "test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs))
    print "test masks are within 0-1\n"

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width)

    print "\ntest PATCHES images shape:"
    print patches_imgs_test.shape
    print "test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test))

    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
コード例 #12
0
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      patch_height, patch_width, N_subimgs, inside_FOV):
    train_imgs = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth)

    train_masks = train_masks / 255.
    data_consistency_check(train_imgs, train_masks)

    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs,
        inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +
          str(np.min(patches_imgs_train)) + ' - ' +
          str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train
コード例 #13
0
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth,
                     Imgs_to_test, patch_height, patch_width):
    test_imgs = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_masks = test_masks / 255.

    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]
    test_masks = test_masks[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border(test_imgs, patch_height, patch_width)
    test_masks = paint_border(test_masks, patch_height, patch_width)

    data_consistency_check(test_imgs, test_masks)

    assert (np.max(test_masks) == 1 and np.min(test_masks) == 0)

    print("\ntest images/masks shape:" ())
    print(test_imgs.shape)
    print("test images range (min-max): " + str(np.min(test_imgs)) + ' - ' +
          str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    patches_masks_test = extract_ordered(test_masks, patch_height, patch_width)
    data_consistency_check(patches_imgs_test, patches_masks_test)

    print("\ntest PATCHES images/masks shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +
          str(np.min(patches_imgs_test)) + ' - ' +
          str(np.max(patches_imgs_test)))

    return patches_imgs_test, patches_masks_test
コード例 #14
0
ファイル: extract_patches.py プロジェクト: HaonanGu/retina_g
def get_data_training(DRIVE_train_imgs_original, DRIVE_train_groudTruth,
                      patch_height, patch_width, N_subimgs, inside_FOV,
                      num_lesion, total_data):

    train_imgs_original = load_hdf5(
        DRIVE_train_imgs_original)  #[img_id:img_id+1]
    train_masks = np.zeros([
        total_data, 1, train_imgs_original.shape[2],
        train_imgs_original.shape[3]
    ])

    train_masks_temp = load_hdf5(
        DRIVE_train_groudTruth +
        '.hdf5')  #[img_id:img_id+1]#masks always the same
    train_masks[:, 0, :, :] = train_masks_temp[:, 0, :, :]
    print("mask:", train_masks_temp.shape)
    print(train_masks[:, 0, :, :].shape)

    print(train_imgs_original[:, 0, :, :].shape)
    train_imgs = my_PreProc(train_imgs_original)

    print(train_imgs[:, 0, :, :].shape)
    train_masks = train_masks / 255.

    train_imgs = train_imgs[:, :,
                            7:429, :]  #cut bottom and top so now it is 422*422
    train_masks = train_masks[:, :, 7:
                              429, :]  #cut bottom and top so now it is 422*422
    data_consistency_check(train_imgs, train_masks)

    #check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images shape:")
    print(train_imgs.shape)
    print("\ntrain masks shape:")
    print(train_masks.shape)
    print("train images 0 range (min-max): " + str(np.min(train_imgs[:, 0])) +
          ' - ' + str(np.max(train_imgs[:, 0])))
    # print "train images 1 range (min-max): " +str(np.min(train_imgs[:,1])) +' - '+str(np.max(train_imgs[:,1]))
    #print "train images 2 range (min-max): " +str(np.min(train_imgs[:,2])) +' - '+str(np.max(train_imgs[:,2]))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs,
        inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print(patches_masks_train.shape)
    print("train PATCHES images range (min-max): " +
          str(np.min(patches_imgs_train)) + ' - ' +
          str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  #, patches_imgs_test, patches_masks_test
def get_data_training(
        DRIVE_train_imgs_original,  # 训练图像路径
        DRIVE_train_groudTruth,  # 金标准图像路径
        patch_height,
        patch_width,
        N_subimgs,
        inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(
        DRIVE_train_groudTruth
    )  # masks always the same,作者在代码中的mask代表ground truth
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  # check original imgs train
    # my_PreProc() 进行标准化,自适应直方图均衡,查找表拉伸直方图,再转化到0~1
    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks / 255.
    # ground turth也转化为0~1 ??? ==> 白色为255,黑色为0,/255后,mask只包含0,1两个值

    if dataset == 'DRIVE':
        train_imgs = train_imgs[:, :, 9:
                                574, :]  # cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, 9:
                                  574, :]  # cut bottom and top so now it is 565*565
    elif dataset == 'STARE':
        train_imgs = train_imgs[:, :, :, 15:685]
        train_masks = train_masks[:, :, :, 15:685]
    elif dataset == 'CHASE':
        train_imgs = train_imgs[:, :, :, 19:979]
        train_masks = train_masks[:, :, :, 19:979]
    elif dataset == 'HRF':
        train_imgs = train_imgs[:, :, :, 19:979]
        train_masks = train_masks[:, :, :, 19:979]

    data_consistency_check(train_imgs, train_masks)

    # check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' +
          str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    # extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs,
        inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +
          str(np.min(patches_imgs_train)) + ' - ' +
          str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  # patches_imgs_test, patches_masks_test
コード例 #16
0
def get_data_training(train_imgs_file, train_gt_file, patch_height,
                      patch_width, num_patches, channel, config):
    """
    :param train_imgs_file:  the filename of training images
    :param train_gt_file: the filename of training ground truth images
    :param patch_height: the height of patch
    :param patch_width: the width of patch
    :param num_patches: the number of patches need to be generated
    :return:
    """
    train_images = hf.load_hdf5(train_imgs_file)
    train_gt = hf.load_hdf5(train_gt_file)
    # visualize(group_images(train_imgs_file[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train

    #train_images = pp.my_PreProc(train_images, config)

    print("shape of training gt sample", train_gt.shape)
    data_consistency_check(train_images, train_gt)

    # The the number of categories
    # print np.shape(np.unique(train_gt))
    category_num = np.shape(np.unique(train_gt))[0]
    print("----------------------------------------------")
    print("shape of training image sample:", train_images.shape)
    print('----------------------------------------------')
    print("train images range (min-max): {}- {} ".format(
        np.min(train_images), np.max(train_images)))

    # randomly extract the TRAINING patches from the full images
    if config['random'] == 1:
        patches_imgs_train, patches_gt_train = extract_random(
            train_images, train_gt, patch_height, patch_width, num_patches,
            channel)
    else:
        patches_imgs_train, patches_gt_train = extract_ordered(
            train_images, train_gt, patch_height, patch_width, channel)
    data_consistency_check(patches_imgs_train, patches_gt_train)

    # print some information of the data

    print('---------------------patches info------------------------')
    print('category_num', category_num)
    print("train PATCHES images shape:", patches_imgs_train.shape)
    print("train PATCHES images range: {}- {}, data type: {}".format(
        np.min(patches_imgs_train), np.max(patches_imgs_train),
        patches_imgs_train.dtype))
    print('gt patch value range {} - {}, data type: {}'.format(
        np.min(patches_gt_train), np.max(patches_gt_train),
        patches_gt_train.dtype))
    print('---------------------------------------------------------')
    return patches_imgs_train, patches_gt_train, category_num
コード例 #17
0
def get_data_training(hdf5_train_imgs, hdf5_train_groundTruth, patch_height,
                      patch_width, N_subimgs):
    train_imgs_original = load_hdf5(hdf5_train_imgs)

    train_masks = load_hdf5(hdf5_train_groundTruth)

    #    train_imgs = preprocessing(train_imgs_original)
    train_imgs = train_imgs_original
    #extract Training patches from the full images

    patches_imgs_train, patches_masks_train = extract_random(
        train_imgs, train_masks, patch_height, patch_width, N_subimgs)

    return patches_imgs_train, patches_masks_train
コード例 #18
0
def get_data_training_rotate(train_imgs_original,
                             train_groudTruth,
                             patch_height,
                             patch_width,
                             N_subimgs,
                             inside_FOV,
                             dataset='DRIVE'):
    train_imgs_original = load_hdf5(train_imgs_original)
    train_masks = load_hdf5(train_groudTruth) # masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train

    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks/255.

    if dataset == 'DRIVE':
        train_imgs = train_imgs[:, :, 9:574, :]  # cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, 9:574, :]  # cut bottom and top so now it is 565*565
    elif dataset == 'CHASE':
        train_imgs = train_imgs[:, :, :, 19:979]  # cut bottom and top so now it is 565*565
        train_masks = train_masks[:, :, :, 19:979]  # cut bottom and top so now it is 565*565
    print("train_imgs shape:", train_imgs.shape)
    data_consistency_check(train_imgs, train_masks)

    # check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' + str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    # extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random_rotate(train_imgs, train_masks, patch_height, patch_width,
                                                                    N_subimgs, inside_FOV)
    # random shuffle
    index = [i for i in range(N_subimgs)]
    random.shuffle(index)
    patches_imgs_train = patches_imgs_train[index]
    patches_masks_train = patches_masks_train[index]
    print("Random Shuffled!")

    data_consistency_check(patches_imgs_train, patches_masks_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " + str(np.min(patches_imgs_train)) + ' - ' + str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  # , patches_imgs_test, patches_masks_test
コード例 #19
0
def get_data_testing(test_img_ori_path, num_test_img, patch_h, patch_w):

    # get img data
    test_img_ori = load_hdf5(test_img_ori_path)
    print('test img shape : ', np.shape(test_img_ori))
    test_imgs = my_preprocessing(test_img_ori)

    # extend both images and masks so they can be divided exactly by the patches dimensions
    # make tensor data.

    test_imgs = test_imgs[0:num_test_img, :, :, :]
    print('type : ', type(test_imgs))
    print('[get data testing func] prev test img shape : {}'.format(
        test_imgs.shape))

    test_imgs = zero_padding_test(test_imgs, patch_h)
    #paint_border(test_imgs,patch_h, patch_w)
    print('[get data testing func] after test img shape : {} '.format(
        test_imgs.shape))

    print("[get_data_testing_func] test images range (min-max): " +
          str(np.min(test_imgs)) + ' - ' + str(np.max(test_imgs)))
    print("[get_data_testing_fucn] test masks are within 0-1\n")

    patches_imgs_test = extract_ordered(test_imgs, patch_h, patch_w)
    #patches_grds_test = extract_ordered(test_grds,patch_h,patch_w)
    #data_consistency_check(test_imgs, test_grds)

    print("\n[get_data_testing_fucn] test PATCHES images/grds shape:")
    print(patches_imgs_test.shape)
    print(
        "[get_data_testing_fucn] test PATCHES images range (min-max): {} - {}".
        format(str(np.min(patches_imgs_test)), str(np.max(patches_imgs_test))))

    return patches_imgs_test
コード例 #20
0
def main(dirname, num_hidden):
    """
    Main program for running the restricted Boltzmann machine
    :param dirname: string with name of the directory in which the input files are present
    :param num_hidden: integer corresponding with the number of hidden nodes
    """
    DATA_SPLIT = 0.8

    # Select a range of number of hidden nodes, which needs to be optimized
    # num_hidden = np.array(range(100, 50, 200))

    # filepath = "./binary/"
    filepath = "./"
    # training_data = hf.load_hdf5(filepath + dirname + "/brain_data_set.hdf5")
    # training_data = pd.DataFrame(pd.read_csv("./boltzmann_machine_toy_data.csv", ',')).as_matrix()
    training_data = hf.load_hdf5(filepath + dirname + ".hdf5")
    print("Finished loading data. Start training")
    r = rbm.RBM(training_data=training_data,
                num_visible=training_data.shape[1],
                num_hidden=int(num_hidden))

    # r = rbm.RBM(training_data=training_data, num_visible=training_data.shape[1], num_hidden=int(num_hidden))
    r.train(outfile=dirname,
            split=DATA_SPLIT,
            max_iterations=1000,
            lr=0.1,
            k=1,
            visualize=False)
    # r.test(split=DATA_SPLIT)
    r.final_hid_recon()
    r.save_parameters(dirname)  # Save output
コード例 #21
0
def get_data_testing_overlap(DRIVE_test_imgs_original, Imgs_to_test,
                             patch_height, patch_width, stride_height,
                             stride_width):
    test_imgs = load_hdf5(DRIVE_test_imgs_original)

    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width,
                                     stride_height, stride_width)

    print("\ntest images shape:")
    print(test_imgs.shape)
    print("test images range (min-max): " + str(np.min(test_imgs)) + ' - ' +
          str(np.max(test_imgs)))

    patches_imgs_test = extract_ordered_overlap(test_imgs, patch_height,
                                                patch_width, stride_height,
                                                stride_width)

    print("\ntest PATCHES images shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +
          str(np.min(patches_imgs_test)) + ' - ' +
          str(np.max(patches_imgs_test)))

    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3]
コード例 #22
0
def get_data_training_overlap(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      DRIVE_train_borders,
                      patch_height,
                      patch_width,
                      N_subimgs,
                      inside_FOV):
    train_imgs_original = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) #masks always the same
    train_borders = load_hdf5(DRIVE_train_borders)
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train


    train_imgs = my_PreProc(train_imgs_original)#This applied contrast and gamma correction
    train_masks = train_masks/255.
    train_borders=train_borders/255.
    
    ######## This is to debug visualization
    
    #print(np.shape(train_borders))
    #tb=np.reshape(train_borders[0,0,:,:],(512,512))
    #plt.imshow(tb)
    ######################### end debug initialization

    #train_imgs = train_imgs[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    #train_masks = train_masks[:,:,9:574,:]  #cut bottom and top so now it is 565*565
    data_consistency_check(train_imgs,train_masks,train_borders)

    #check masks are within 0-1
    print(np.min(train_masks),np.max(train_masks))
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)
    assert(np.min(train_borders)==0 and np.max(train_borders)==1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train, patches_borders_train = extract_ordered_overlap_train(train_imgs,train_masks,train_borders,patch_height,patch_width)
    data_consistency_check(patches_imgs_train, patches_masks_train,patches_borders_train)

    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
コード例 #23
0
ファイル: extract_patches.py プロジェクト: HaonanGu/retina_g
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth,
                     Imgs_to_test, patch_height, patch_width, num_lesion,
                     total_data):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = np.zeros([
        total_data, 1, test_imgs_original.shape[2], test_imgs_original.shape[3]
    ])

    test_masks_temp = load_hdf5(
        DRIVE_test_groudTruth +
        '.hdf5')  #[img_id:img_id+1]#masks always the same
    test_masks[:, 0, :, :] = test_masks_temp[:, 0, :, :]

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks / 255.

    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]
    test_masks = test_masks[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border(test_imgs, patch_height, patch_width)
    test_masks = paint_border(test_masks, patch_height, patch_width)

    data_consistency_check(test_imgs, test_masks)

    #check masks are within 0-1
    assert (np.max(test_masks) == 1 and np.min(test_masks) == 0)

    print("\ntest images/masks shape:")
    print(test_imgs.shape)
    print("test images range (min-max): " + str(np.min(test_imgs)) + ' - ' +
          str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    patches_masks_test = extract_ordered(test_masks, patch_height, patch_width)
    data_consistency_check(patches_imgs_test, patches_masks_test)

    print("\ntest PATCHES images/masks shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +
          str(np.min(patches_imgs_test)) + ' - ' +
          str(np.max(patches_imgs_test)))

    return patches_imgs_test, patches_masks_test
コード例 #24
0
def get_data_testing(test_imgs_original, test_groudTruth, Imgs_to_test,
                     patch_height, patch_width):

    # Load test images from hdf5 files from pre-processing
    test_imgs_original = load_hdf5(test_imgs_original)
    test_groundTruths = load_hdf5(test_groudTruth)

    # Normalize test ground truths
    test_imgs = my_PreProc(test_imgs_original)
    test_groundTruths = test_groundTruths / 255.

    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]
    test_groundTruths = test_groundTruths[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border(test_imgs, patch_height, patch_width)
    test_groundTruths = paint_border(test_groundTruths, patch_height,
                                     patch_width)

    data_consistency_check(test_imgs, test_groundTruths)

    # Check masks are within 0-1
    assert (np.max(test_groundTruths) == 1 and np.min(test_groundTruths) == 0)

    print "train images shape:" + str(test_imgs.shape)
    print "train images range (min-max): " + str(
        np.min(test_imgs)) + ' - ' + str(np.max(test_imgs))
    print "train ground truths shape:" + str(test_groundTruths.shape)
    print "train ground truths range (min-max): " + str(
        np.min(test_groundTruths)) + ' - ' + str(np.max(test_groundTruths))

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    patches_groundTruths_test = extract_ordered(test_groundTruths,
                                                patch_height, patch_width)
    data_consistency_check(patches_imgs_test, patches_groundTruths_test)

    print "test PATCHES images shape: " + patches_imgs_test.shape
    print "test PATCHES images range (min-max): " + str(
        np.min(patches_imgs_test)) + ' - ' + str(np.max(patches_imgs_test))
    print "test PATCHES ground truths shape: " + patches_groundTruths_test.shape
    print "test PATCHES ground truths range (min-max): " + str(
        np.min(patches_groundTruths_test)) + ' - ' + str(
            np.max(patches_groundTruths_test))

    return patches_imgs_test, patches_groundTruths_test
コード例 #25
0
def get_data_testing_overlap(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width, stride_height, stride_width):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    param = None
    if os.path.exists(path_name + "/param.npy"):
        param = np.load(path_name + "/param.npy", allow_pickle=True).item()
    test_imgs = my_PreProc_RGB(test_imgs_original, param)
    test_masks = test_masks/255.
    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    #test_imgs_eval, test_masks_eval = test_imgs[:,:,:,100:-100], test_masks[:,:,:,100:-100]

    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)
    #test_imgs_eval = paint_border_overlap(test_imgs_eval, patch_height, patch_width, stride_height, stride_width)
    #test_masks_eval = paint_border_overlap(test_masks_eval, patch_height, patch_width, stride_height, stride_width)

    #check masks are within 0-1
    assert(np.max(test_masks)==1  and np.min(test_masks)==0)

    print "\ntest images shape:"
    print test_imgs.shape
    print "\ntest mask shape:"
    print test_masks.shape
    print "test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs))
    print "test masks are within 0-1\n"

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width)

    #extract patches to evaluate performance on them:
    #patches_imgs_test_eval = extract_ordered_overlap(test_imgs_eval,patch_height,patch_width,stride_height,stride_width)
    #patches_masks_test_eval = extract_ordered_overlap(test_masks_eval,patch_height,patch_width,stride_height,stride_width)

    print "\ntest PATCHES images shape:"
    print patches_imgs_test.shape
    print "test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test))

    #return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks, patches_imgs_test_eval, patches_masks_test_eval
    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
コード例 #26
0
def get_data_testing_overlap(test_images_file, test_gt_file, n_test_images,
                             patch_height, patch_width, stride_height,
                             stride_width, channel, config):
    """
    :param test_images_file: the filename of hdf5 test_images_file
    :param test_gt_file: the filename of hdf5 test_gt_file
    :param n_test_images: the num of test image
    :param patch_height: the height of each patch
    :param patch_width: the width of each width
    :param stride_height: the stride of height
    :param stride_width: the stride of width
    :return:
    """

    test_images = hf.load_hdf5(test_images_file)
    test_gt = hf.load_hdf5(test_gt_file)
    # preproceing the test images
    # extend both images and masks so they can be divided exactly by the patches dimensions
    test_images = test_images[0:n_test_images, :, :, :]
    test_gt = test_gt[0:n_test_images, :, :, :]
    test_images = paint_border_overlap(test_images, patch_height, patch_width,
                                       stride_height, stride_width, channel)

    print("extended test images shape:", test_images.shape)
    print("print sample data:", test_images[0, 0, 0:100, 0:100])
    print("test ground truth shape:", test_gt.shape)
    print("sample gt:", test_gt[0, 0, 0:100, 0:100])

    print("test images range (min-max): {}-{} ".format(np.min(test_images),
                                                       np.max(test_images)))

    # extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_images, patch_height,
                                                patch_width, stride_height,
                                                stride_width, channel)

    print("test PATCHES images shape:", patches_imgs_test.shape)
    print("test PATCHES images range (min-max):{} - {}".format(
        np.min(patches_imgs_test), np.max(patches_imgs_test)))

    return patches_imgs_test, test_images.shape[2], test_images.shape[
        3], test_gt, np.max(test_gt) + 1
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth,
                     Imgs_to_test, patch_height, patch_width):
    # test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks / 255.

    # extend both images and masks so they can be divided exactly by the patches dimensions
    '''
    为了保证能分出整数的patch,需要将test_imgs进行补0扩展——paint_border()
    '''
    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]  # Imgs_to_test = 20
    test_masks = test_masks[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border(test_imgs, patch_height, patch_width)
    test_masks = paint_border(test_masks, patch_height, patch_width)

    data_consistency_check(test_imgs, test_masks)

    # check masks are within 0-1
    assert (np.max(test_masks) == 1 and np.min(test_masks) == 0)

    print("\ntest images/masks shape:")
    print(test_imgs.shape)
    print("test images range (min-max): " + str(np.min(test_imgs)) + ' - ' +
          str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    # extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs, patch_height, patch_width)
    patches_masks_test = extract_ordered(test_masks, patch_height, patch_width)
    data_consistency_check(patches_imgs_test, patches_masks_test)

    print("\ntest PATCHES images/masks shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " +
          str(np.min(patches_imgs_test)) + ' - ' +
          str(np.max(patches_imgs_test)))

    return patches_imgs_test, patches_masks_test
コード例 #28
0
def get_data_testing(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_masks = load_hdf5(DRIVE_test_groudTruth)

    param = None
    if os.path.exists(path_name + "/param.npy"):
        param = np.load(path_name + "/param.npy", allow_pickle=True).item()
    test_imgs = my_PreProc_RGB(test_imgs_original, param)
    test_masks = test_masks/255.

    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_masks = test_masks[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border(test_imgs,patch_height,patch_width)
    test_masks = paint_border(test_masks,patch_height,patch_width)

    data_consistency_check(test_imgs, test_masks)

    #check masks are within 0-1
    assert(np.max(test_masks)==1  and np.min(test_masks)==0)

    print "\ntest images/masks shape:"
    print test_imgs.shape
    print "test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs))
    print "test masks are within 0-1\n"

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs,patch_height,patch_width)
    patches_masks_test = extract_ordered(test_masks,patch_height,patch_width)
    data_consistency_check(patches_imgs_test, patches_masks_test)

    print "\ntest PATCHES images/masks shape:"
    print patches_imgs_test.shape
    print "test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test))

    return patches_imgs_test, patches_masks_test
コード例 #29
0
def get_data_testing_color(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, patch_height, patch_width):
    ### test
    test_imgs_original = load_hdf5(DRIVE_test_imgs_original)
    test_labels = load_hdf5(DRIVE_test_groudTruth)

    test_imgs = train_imgs_original/255.
    test_labels = np_utils.to_categorical(test_labels, nb_classes)

    #extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test,:,:,:]
    test_imgs = paint_border(test_imgs,patch_height,patch_width)

    print "\ntest images/masks shape:"
    print test_imgs.shape
    print "test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs))

    #extract the TEST patches from the full images
    patches_imgs_test = extract_ordered(test_imgs,patch_height,patch_width)

    print "\ntest PATCHES images/masks shape:"
    print patches_imgs_test.shape
    print "test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test))

    return patches_imgs_test, test_labels
コード例 #30
0
def get_data_testing(test_img_ori_path, num_test_img, patch_h, patch_w):

    # get img data
    test_img_ori = load_hdf5(test_img_ori_path)
    print('test img shape : ', np.shape(test_img_ori))
    test_imgs = my_preprocessing(test_img_ori)
    test_imgs = test_imgs / 255.0
    patches_imgs_test = test_imgs

    #patches_imgs_test = extract_ordered(test_imgs,patch_h,patch_w)
    #patches_grds_test = extract_ordered(test_grds,patch_h,patch_w)
    #data_consistency_check(test_imgs, test_grds)

    print("\n[get_data_testing_fucn] test PATCHES images/grds shape:")
    print(patches_imgs_test.shape)
    print(
        "[get_data_testing_fucn] test PATCHES images range (min-max): {} - {}".
        format(str(np.min(patches_imgs_test)), str(np.max(patches_imgs_test))))

    return patches_imgs_test
コード例 #31
0
def get_data_testing_overlap(DRIVE_test_img_ori_path, num_test_img, patch_h,
                             patch_w, stride_h, stride_w):

    # get img data
    test_img_ori = load_hdf5(DRIVE_test_img_ori_path)
    print(np.shape(test_img_ori))
    test_imgs = my_preprocessing(test_img_ori)

    # extend both images and masks so they can be divided exactly by the patches dimensions
    # make tensor data.

    test_imgs = test_imgs[0:num_test_img, :, :, :]
    print('type : ', type(test_imgs))
    print('[get data testing overlap] prev test img shape : {} '.format(
        test_imgs.shape))

    # 여기서부터 달라진다.
    test_imgs = paint_border_overlap(test_imgs, patch_h, patch_w, stride_h,
                                     stride_w)
    print('[get data testing overlap] after test img shape : {} '.format(
        test_imgs.shape))

    print("[get_data_testing_overlap func] test images range (min-max): " +
          str(np.min(test_imgs)) + ' - ' + str(np.max(test_imgs)))
    print("[get_data_testing_overlap fucn] test masks are within 0-1\n")

    patches_imgs_test = extract_ordered_overlap(test_imgs, patch_h, patch_w,
                                                stride_h, stride_w)

    print("\n[get_data_testing_overlap func] test PATCHES images shape:")
    print(patches_imgs_test.shape)
    print(
        "[get_data_testing_overlap func] test PATCHES images range (min-max): "
        + str(np.min(patches_imgs_test)) + ' - ' +
        str(np.max(patches_imgs_test)))

    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3]
コード例 #32
0
def get_data_training(DRIVE_train_imgs_original,
                      DRIVE_train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs,
                      inside_FOV):
    train_imgs = load_hdf5(DRIVE_train_imgs_original)
    train_masks = load_hdf5(DRIVE_train_groudTruth) #masks always the same
    # visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train')#.show()  #check original imgs train

    #train_imgs = my_PreProc_RGB(train_imgs_original)
    train_masks = train_masks/255.

    data_consistency_check(train_imgs,train_masks)

    #check masks are within 0-1
    assert(np.min(train_masks)==0 and np.max(train_masks)==1)

    """
    print "\ntrain images/masks shape:"
    print train_imgs.shape
    print "train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs))
    print "train masks are within 0-1\n"
    """

    total = train_imgs.shape[0]
    idx_train = random.sample(range(total), int(total*0.9)) 
    idx_val = list(set(range(total)) - set(idx_train))

    imgs_train, imgs_val = train_imgs[idx_train], train_imgs[idx_val]
    masks_train, masks_val = train_masks[idx_train], train_masks[idx_val]

    """
    total = train_imgs.shape[0]
    split = int(total * 0.9)
    imgs_train, imgs_val = train_imgs[:split], train_imgs[split:]
    masks_train, masks_val = train_masks[:split], train_masks[split:]
    """

    imgs_train = my_PreProc_RGB(imgs_train)
    param = None
    if os.path.exists(path_name + "/param.npy"):
        param = np.load(path_name + "/param.npy", allow_pickle=True).item()
    imgs_val = my_PreProc_RGB(imgs_val, param)

    # Crop black regions on left and right side of retina:
    imgs_train, imgs_val = imgs_train[:,:,:,100:-100], imgs_val[:,:,:,100:-100]
    masks_train, masks_val = masks_train[:,:,:,100:-100], masks_val[:,:,:,100:-100]

    print "\ntrain images/masks shape:"
    print imgs_train.shape
    print "train images range (min-max): " +str(np.min(imgs_train)) +' - '+str(np.max(imgs_train))
    print "train masks are within 0-1\n"

    """ 
    #extract the TRAINING patches from the full images
    patches_imgs_train, patches_masks_train = extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs,inside_FOV)
    data_consistency_check(patches_imgs_train, patches_masks_train)

    print "\ntrain PATCHES images/masks shape:"
    print patches_imgs_train.shape
    print "train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train))
    """

    N_train = N_subimgs / total * imgs_train.shape[0]
    N_val = N_subimgs / total * imgs_val.shape[0]
    #random.seed(10)
    patches_imgs_train, patches_masks_train = extract_random(imgs_train,masks_train,patch_height,patch_width,N_train,inside_FOV)
    patches_imgs_val, patches_masks_val = extract_random(imgs_val,masks_val,patch_height,patch_width,N_val,inside_FOV)

    #return patches_imgs_train, patches_masks_train#, patches_imgs_test, patches_masks_test
    return patches_imgs_train, patches_masks_train, patches_imgs_val, patches_masks_val