Esempio n. 1
0
def get_data_testing_overlap(test_imgs_original, test_groudTruth, Imgs_to_test, patch_height, patch_width,
                             stride_height, stride_width):
    ### test
    test_imgs_original = load_hdf5(test_imgs_original)
    test_masks = load_hdf5(test_groudTruth)

    test_imgs = my_PreProc(test_imgs_original)
    test_masks = test_masks / 255.
    # extend both images and masks so they can be divided exactly by the patches dimensions
    test_imgs = test_imgs[0:Imgs_to_test, :, :, :]
    test_masks = test_masks[0:Imgs_to_test, :, :, :]
    test_imgs = paint_border_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    # check masks are within 0-1
    assert (np.max(test_masks) == 1 and np.min(test_masks) == 0)

    print("\ntest images shape:")
    print(test_imgs.shape)
    print("\ntest mask shape:")
    print(test_masks.shape)
    print("test images range (min-max): " + str(np.min(test_imgs)) + ' - ' + str(np.max(test_imgs)))
    print("test masks are within 0-1\n")

    # extract the TEST patches from the full images
    patches_imgs_test = extract_ordered_overlap(test_imgs, patch_height, patch_width, stride_height, stride_width)

    print("\ntest PATCHES images shape:")
    print(patches_imgs_test.shape)
    print("test PATCHES images range (min-max): " + str(np.min(patches_imgs_test)) + ' - ' + str(
        np.max(patches_imgs_test)))
    patches_imgs_test = np.transpose(patches_imgs_test, (0, 2, 3, 1))
    test_masks = np.transpose(test_masks, (0, 2, 3, 1))
    return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks
def get_data_training(train_imgs_original,
                      train_groudTruth,
                      patch_height,
                      patch_width,
                      N_subimgs,
                      inside_FOV,
                      fcn=True):
    train_imgs_original = read_pickle(train_imgs_original)
    train_masks = read_pickle(train_groudTruth)  # masks always the same

    # visualize(group_images(train_imgs_original[:, :, :, :], 5),
    #           path_experiment + 'imgs_train')

    # TODO: preprocessing
    train_imgs = my_PreProc(train_imgs_original)
    train_masks = train_masks / 255.
    # if dataset == 'DRIVE':
    #     train_imgs = train_imgs[:, :, 9:574, :]  # cut bottom and top so now it is 565*565
    #     train_masks = train_masks[:, :, 9:574, :]  # cut bottom and top so now it is 565*565
    # elif dataset == 'STARE':
    #     train_imgs = train_imgs[:, :, :, 15:685]
    #     train_masks = train_masks[:, :, :, 15:685]
    # elif dataset == 'CHASE':
    #     train_imgs = train_imgs[:, :, :, 19:979]
    #     train_masks = train_masks[:, :, :, 19:979]
    # elif dataset == 'HRF':
    #     train_imgs = train_imgs[:, :, :, 19:979]
    #     train_masks = train_masks[:, :, :, 19:979]
    data_consistency_check(train_imgs, train_masks)

    # check masks are within 0-1
    assert (np.min(train_masks) == 0 and np.max(train_masks) == 1)

    print("\ntrain images/masks shape:")
    print(train_imgs.shape)
    print("train images range (min-max): " + str(np.min(train_imgs)) + ' - ' + str(np.max(train_imgs)))
    print("train masks are within 0-1\n")

    # extract the TRAINING patches from the full images
    if fcn:
        patches_imgs_train, patches_masks_train = extract_random_patches(train_imgs, train_masks,
                                                                         patch_height, patch_width,
                                                                         N_subimgs,
                                                                         inside=inside_FOV)
        data_consistency_check(patches_imgs_train, patches_masks_train)
    else:
        patches_imgs_train, patches_masks_train = extract_random(train_imgs, train_masks,
                                                                 patch_height, patch_width,
                                                                 N_subimgs,
                                                                 inside=inside_FOV)
    ##Fourier transform of patches
    # TODO add hessian grangi
    # patches_imgs_train = my_PreProc_patches(patches_imgs_train)
    print("\ntrain PATCHES images/masks shape:")
    print(patches_imgs_train.shape)
    print("train PATCHES images range (min-max): " + str(np.min(patches_imgs_train)) + ' - ' + str(
        np.max(patches_imgs_train)))

    return patches_imgs_train, patches_masks_train  # , patches_imgs_test, patches_masks_test
Esempio n. 3
0
    def gen(self, au=True, crop_size=64, iteration=None):

        while True:
            data_yield = [
                self.index % self.n, (self.index + self.step) % self.n if
                (self.index + self.step) < self.n else self.n
            ]
            self.index = (self.index + self.step) % self.n

            list_images_base = self.list_images_all[
                data_yield[0]:data_yield[1]]
            list_gt_base = self.list_gt_all[data_yield[0]:data_yield[1]]

            list_images_base = pre_processing.my_PreProc(
                list_images_base)  # 图片增强预处理

            list_images_aug = []
            list_gt_aug = []
            for image, gt in zip(list_images_base, list_gt_base):
                if au:
                    if crop_size == prepare_dataset.DESIRED_DATA_SHAPE[0]:
                        for _ in range(self.repeat):
                            image, gt = data_augmentation.random_augmentation(
                                image, gt)
                            list_images_aug.append(image)
                            list_gt_aug.append(gt)
                    else:
                        image, gt = data_augmentation.random_augmentation(
                            image, gt)
                        list_images_aug.append(image)
                        list_gt_aug.append(gt)
                else:
                    list_images_aug.append(image)
                    list_gt_aug.append(gt)

            list_images = []
            list_gt = []

            if crop_size == prepare_dataset.DESIRED_DATA_SHAPE[0]:
                list_images = list_images_aug
                list_gt = list_gt_aug
            else:
                for image, gt in zip(list_images_aug, list_gt_aug):
                    for _ in range(self.repeat):
                        image_, gt_ = random_crop(image, gt, crop_size)

                        list_images.append(image_)
                        list_gt.append(gt_)
            outs = {}
            for iteration_id in range(iteration):
                outs.update({f'out1{iteration_id + 1}': np.array(list_gt)})
            outs.update({'final_out': np.array(list_gt)})

            yield np.array(list_images), outs
def get_test_patches(img, crop_size, stride_size):
    test_img = []

    test_img.append(img)
    test_img = np.asarray(test_img)  # (1,576,576,3)

    test_img_adjust = my_PreProc(test_img)  # (1,576,576,1)

    test_imgs = paint_border(test_img_adjust, crop_size, stride_size)

    test_img_patch = extract_patches(test_imgs, crop_size, stride_size)

    return test_img_patch, test_imgs.shape[1], test_imgs.shape[2], test_img_adjust
            predictions.append(pred_prob)
end_time = time.time()
print("predict time:" + str(end_time - start_time))
# ===== Convert the prediction arrays in corresponding images
print("predicted images size :")
pred_patches = np.concatenate(predictions, 0)
print(pred_patches.shape)

# ========== Elaborate and visualize the predicted images ====================
pred_imgs = None
orig_imgs = None
gtruth_masks = None
if average_mode == True:
    pred_imgs = recompone_overlap(pred_patches, new_height, new_width,
                                  stride_height, stride_width)  # predictions
    orig_imgs = my_PreProc(
        test_imgs_orig[0:pred_imgs.shape[0], :, :, :])  # originals
    gtruth_masks = np.transpose(masks_test, (0, 3, 1, 2))  # ground truth masks

else:
    pred_imgs = recompone(pred_patches, 13, 12)  # predictions
    orig_imgs = recompone(patches_imgs_test, 13, 12)  # originals
    gtruth_masks = recompone(np.transpose(patches_masks_test, (0, 3, 1, 2)),
                             13, 12)  # masks
# apply the DRIVE masks on the repdictions #set everything outside the FOV to zero!!
# kill_border(pred_imgs, test_border_masks)  # MASK  #only for visualization
## back to original dimensions

orig_imgs = orig_imgs[:, :, 0:full_img_height, 0:full_img_width]
pred_imgs = pred_imgs[:, :, 0:full_img_height, 0:full_img_width]
gtruth_masks = gtruth_masks[:, :, 0:full_img_height, 0:full_img_width]