Exemple #1
0
def gpu_transforms(dcm_list) -> DataLoader:
    msk_trfm = A.Compose([A.Resize(256, 256)],
            additional_targets={"image1": 'image', "mask1": 'mask'})

    ctds = CTDicomSlices(dcm_list, img_and_mask_transform = msk_trfm, n_surrounding=0, trim_edges=False, self_supervised_mask=True)#, preprocessing = prep)
    
    dl = DataLoader(ctds, batch_size=1, num_workers = 0, shuffle=True)

    return dl
Exemple #2
0
def cpu_transforms(dcm_list) -> DataLoader:
    mean, std = [61.0249], [78.3195]

    prep = transforms.Compose([Window(50, 200), Imagify(50, 200), Normalize(mean, std)])

    resize_tsfm = A.Compose([A.Resize(256, 256)],
            additional_targets={"image1": 'image', "mask1": 'mask'})

    img_mask_tsfm = A.Compose([
                    A.ShiftScaleRotate(shift_limit=(0.1, 0.1), scale_limit=(0.9, 1.1), rotate_limit=15),
                    A.HorizontalFlip()],
            additional_targets={"image1": 'image', "mask1": 'mask'})
    
    ctds = CTDicomSlices(dcm_list, preprocessing = prep,
                        resize_transform = resize_tsfm, img_and_mask_transform = img_mask_tsfm, n_surrounding=0)

   #ctds = CTDicomSlicesFelzenszwalb(dcm_list, resize_transform=resize_tsfm, preprocessing=prep, transform = img_tsfm, n_surrounding=0, trim_edges=False)

    dl = DataLoader(ctds, batch_size=1, num_workers = 0, shuffle=True)

    return dl
Exemple #3
0
def get_dataset(dataset, model_dir):
    '''
    Builds the necessary datasets
    '''
    # create ds
    #dcm_list = CTDicomSlicesJigsaw.generate_file_list(dataset,
    #    dicom_glob='/*/*/dicoms/*.dcm')

    prep = transforms.Compose([Window(WL, WW), Imagify(WL, WW)]) #, Normalize(mean, std)])

    if pre_train == 'jigsaw' or pre_train == 'jigsaw_ennead' or pre_train == 'jigsaw_softrank':
        if pre_train == 'jigsaw_softrank':
            n_perms = None

        dsm = DatasetManager.generate_train_val_test(dataset, val_frac=0.05, test_frac=0, pretrain_ds=True)
        if model_dir is not None:
            dsm.save_lists(model_dir)

        train_dicoms, val_dicoms, _ = dsm.get_dicoms()

        datasets = {}
        datasets['train'] = CTDicomSlicesJigsaw(train_dicoms, preprocessing=prep, return_tile_coords=True,
            perm_path=Constants.default_perms, n_shuffles_per_image=num_shuffles, num_perms=n_perms)
        
        datasets['val'] = CTDicomSlicesJigsaw(val_dicoms, preprocessing=prep, return_tile_coords=True,
            perm_path=Constants.default_perms, n_shuffles_per_image=num_shuffles, num_perms=n_perms)

        return datasets

    elif pre_train == 'felz':
        dcm_list = CTDicomSlicesJigsaw.generate_file_list(dataset,
                dicom_glob='/*/*/dicoms/*.dcm')

        # Felz masks were saved with foreground being 1,2,3,4 (instead of 255). mask_is_255 flag is CRITICAL
        ctds = CTDicomSlices(dcm_list, preprocessing=prep, n_surrounding=in_channels // 2, mask_is_255=False)
        
        return ctds

    else:
        raise Exception('Invalid pre_train mode of "{}"'.format(pre_train))
Exemple #4
0
    ax = fig.add_subplot(1, 3, 1)
    ax.imshow(slices, cmap='gray')

    ax2 = fig.add_subplot(1, 3, 2)
    ax2.imshow(slices, cmap='gray')
    ax2.imshow(mask, cmap='jet', alpha=0.5)

    ax3 = fig.add_subplot(1, 3, 3)
    ax3.imshow(mask, cmap='gray')

    fig.show()


if __name__ == '__main__':
    dataset = '/mnt/g/thesis/ct_only_cleaned_resized_mini/head-neck-radiomics'
    #dataset = Constants.organized_dataset_2
    dcm_list = CTDicomSlices.generate_file_list(dataset)
    #dicom_glob='/*/*/*.dcm')

    prep = transforms.Compose([Window(50, 200), Imagify(50, 200)])
    tsfm = A.Compose([
        A.SmallestMaxSize(max_size=256, always_apply=True, p=1),
        A.CenterCrop(256, 256, always_apply=True, p=1.0)
    ])

    #ctds = CTDicomSlices(dcm_list, preprocessing=prep, resize_transform=tsfm,
    #                n_surrounding=0)# , felz_crop=True)

    ctds = CTDicomSlices(dcm_list, n_surrounding=0, mask_is_255=False)

    show_dataset(ctds)
Exemple #5
0
                                  edgecolor='g',
                                  facecolor='none')
            ax.add_patch(p)

    puzzle = ResnetJigsaw.tiles_to_image(tiles, in_channels=1)[0]

    ax2 = fig.add_subplot(1, 2, 2)
    ax2.imshow(puzzle[0], cmap='gray')

    fig.show()


if __name__ == '__main__':
    dataset = Constants.organized_dataset_2  #Constants.organized_dataset_2 #'/mnt/g/thesis/ct_only_cleaned_mini' #Constants.ct_only_cleaned  #
    dcm_list = CTDicomSlices.generate_file_list(
        dataset,
        dicom_glob='/*/dicoms/*.dcm')  #dicom_glob='/*/*/dicoms/*.dcm')

    prep = transforms.Compose(
        [Window(50, 200),
         Imagify(50, 200),
         Normalize(61.0249, 78.3195)])
    ctds = CTDicomSlicesJigsaw(dcm_list,
                               preprocessing=prep,
                               trim_edges=False,
                               return_tile_coords=True,
                               perm_path=Constants.default_perms,
                               n_shuffles_per_image=1,
                               num_perms=100)

    #show_jigsaw_dataset(ctds)
Exemple #6
0
def get_datasets(model_dir=None,
                 new_ds_split=True,
                 train_list="train.txt",
                 val_list="val.txt",
                 test_list="test.txt"):
    '''
    Builds the necessary datasets
    model_dir is where model parameters will be stored
    new_ds_split creates a new train/val/test split if True, and loads the relevant folders if false
    dslist_in_pt_dir uses the pt dir as a reference for where the patient lists are located
    '''

    # Manage patient splits
    if new_ds_split:
        dsm = DatasetManager.generate_train_val_test(dataset_dir, val_frac,
                                                     test_frac)
        if model_dir is not None:
            dsm.save_lists(model_dir)
    else:
        dsm = DatasetManager.load_train_val_test(dataset_dir, train_list,
                                                 val_list, test_list)

    #preprocess_fn = get_preprocessing_fn(backbone, pretrained=encoder_weights)

    prep = transforms.Compose(
        [Window(WL, WW), Imagify(WL, WW),
         Normalize(mean, std)])

    resize_tsfm = A.Compose([A.Resize(img_size, img_size)],
                            additional_targets={
                                "image1": 'image',
                                "mask1": 'mask'
                            })

    img_mask_tsfm = A.Compose([
        A.ShiftScaleRotate(
            shift_limit=translate, scale_limit=scale, rotate_limit=rotate),
        A.HorizontalFlip()
    ],
                              additional_targets={
                                  "image1": 'image',
                                  "mask1": 'mask'
                              })

    # create ds
    train_dicoms, val_dicoms, test_dicoms = dsm.get_dicoms(
        train_frac=train_frac)

    datasets = {}
    datasets['train'] = CTDicomSlices(train_dicoms,
                                      preprocessing=prep,
                                      resize_transform=resize_tsfm,
                                      img_and_mask_transform=img_mask_tsfm,
                                      n_surrounding=in_channels // 2)
    datasets['val'] = CTDicomSlices(val_dicoms,
                                    preprocessing=prep,
                                    resize_transform=resize_tsfm,
                                    n_surrounding=in_channels // 2)
    datasets['test'] = CTDicomSlices(test_dicoms,
                                     preprocessing=prep,
                                     resize_transform=resize_tsfm,
                                     n_surrounding=in_channels // 2)

    return datasets
Exemple #7
0
    selected_pixels = selected_pixels.astype('int32')

    selected_segments = [segments[tuple(sp)] for sp in selected_pixels]

    pre_mask = [segments == ss for ss in selected_segments]

    mask = np.logical_or.reduce(pre_mask)

    return segments, mask

use_gpu_transforms = False

if __name__ == '__main__':
    only_positive = True
    dataset = '/home/hussam/organized_dataset_2/'
    dcm_list = CTDicomSlices.generate_file_list(dataset)

    if use_gpu_transforms:
        ct_dl = gpu_transforms(dcm_list)
    else:
        ct_dl = cpu_transforms(dcm_list)

    for slices, mask, img_path, slice_n in ct_dl:
        slices = slices.permute(0, 3, 1, 2)
        if use_gpu_transforms:
            slices, mask = do_inplace_transforms(slices, mask)
            mask = mask[0]

        slices = slices[0]
        img_path = img_path[0]
        slice_n = slice_n[0]