Example #1
0
    for i in identifiers:
        shutil.copy(join(imagests_source, i + "_C0.nii.gz"),
                    join(imagests, i + '_0000.nii.gz'))
        shutil.copy(join(imagests_source, i + "_DE.nii.gz"),
                    join(imagests, i + '_0001.nii.gz'))
        shutil.copy(join(imagests_source, i + "_T2.nii.gz"),
                    join(imagests, i + '_0002.nii.gz'))

    generate_dataset_json(
        join(out_base, 'dataset.json'),
        imagestr,
        None, ('C0', 'DE', 'T2'), {
            0: 'background',
            1: "left ventricular (LV) blood pool",
            2: "right ventricular blood pool",
            3: "LV normal myocardium",
            4: "LV myocardial edema",
            5: "LV myocardial scars",
        },
        task_name,
        license=
        'see http://www.sdspeople.fudan.edu.cn/zhuangxiahai/0/myops20/index.html',
        dataset_description=
        'see http://www.sdspeople.fudan.edu.cn/zhuangxiahai/0/myops20/index.html',
        dataset_reference=
        'http://www.sdspeople.fudan.edu.cn/zhuangxiahai/0/myops20/index.html',
        dataset_release='0')

    # REMEMBER THAT TEST SET INFERENCE WILL REQUIRE YOU CONVERT THE LABELS BACK TO THEIR CONVENTION
    # use convert_labels_back_to_myops for that!
    # man I am such a nice person. Love you guys.
    images_dir_ts = join(base, 'testing', 'input')
    testing_cases = subfiles(labels_dir_ts, suffix='.png', join=False)
    for ts in testing_cases:
        unique_name = ts[:-4]
        input_segmentation_file = join(labels_dir_ts, ts)
        input_image_file = join(images_dir_ts, ts)

        output_image_file = join(target_imagesTs, unique_name)
        output_seg_file = join(target_labelsTs, unique_name)

        convert_2d_image_to_nifti(input_image_file, output_image_file, is_seg=False)
        convert_2d_image_to_nifti(input_segmentation_file, output_seg_file, is_seg=True,
                                  transform=lambda x: (x == 255).astype(int))

    # finally we can call the utility for generating a dataset.json
    generate_dataset_json(join(target_base, 'dataset.json'), target_imagesTr, target_imagesTs, ('Red', 'Green', 'Blue'),
                          labels={1: 'street'}, dataset_name=task_name, license='hands off!')

    """
    once this is completed, you can use the dataset like any other nnU-Net dataset. Note that since this is a 2D
    dataset there is no need to run preprocessing for 3D U-Nets. You should therefore run the 
    `nnUNet_plan_and_preprocess` command like this:
    
    > nnUNet_plan_and_preprocess -t 120 -pl3d None
    
    once that is completed, you can run the trainings as follows:
    > nnUNet_train 2d nnUNetTrainerV2 120 FOLD
    
    (where fold is again 0, 1, 2, 3 and 4 - 5-fold cross validation)
    
    there is no need to run nnUNet_find_best_configuration because there is only one model to shoose from.
    Note that without running nnUNet_find_best_configuration, nnU-Net will not have determined a postprocessing
Example #3
0
imagesTr_path = os.path.join(dataset_folder, "imagesTr")
labelsTr_path = os.path.join(dataset_folder, "labelsTr")
imagesTs_path = os.path.join(dataset_folder, "imagesTs")
###
imagesTr_example = utils.get_identifiers_from_covid_GC(imagesTr_path)
print("+ imagesTr: ", imagesTr_example)
print("+ imagesTr: ", len(imagesTr_example))

# df_modality = pd.read_csv(os.path.join(radiomics_folder, "lesion_features-0.csv") , sep=',')

## Dataset conversion params
output_file = os.path.join(dataset_folder, "dataset.json")
imagesTr_dir = os.path.join(dataset_folder, "imagesTr")
imagesTs_dir = os.path.join(dataset_folder, "imagesTs")
modalities = ["CT"]
labels = {0: 'background', 1: 'lesion'}
dataset_name = "Task115_COVID-19",
license = "Hands on",
dataset_description = "Lesion segmentation for covid+",
dataset_reference = "COVID-19-20 - Grand Challenge & MICCAI",
dataset_release = '0.0'

utils.generate_dataset_json(output_file, imagesTr_dir, imagesTs_dir,
                            modalities, labels, dataset_name, license,
                            dataset_description, dataset_reference,
                            dataset_release)

## nnUNET Laucher
## Step-1: nnUNet_plan_and_preprocess -t 115 --verify_dataset_integrity
## Step-2: nnUNet_train 3d_fullres nnUNetTrainerV2 115 0
        shutil.copy(data_file, join(target_imagesTr, c + "_0000.nii.gz"))
        shutil.copy(seg_file, join(target_labelsTr, c + '.nii.gz'))

    val_orig = join(downloaded_data_dir, "Validation")
    cases = [i[:-10] for i in subfiles(val_orig, suffix='_ct.nii.gz', join=False)]
    for c in cases:
        data_file = join(val_orig, c + '_ct.nii.gz')

        shutil.copy(data_file, join(target_imagesVal, c + "_0000.nii.gz"))

    generate_dataset_json(
        join(target_base, 'dataset.json'),
        target_imagesTr,
        None,
        ("CT", ),
        {0: 'background', 1: 'covid'},
        task_name,
        dataset_reference='https://covid-segmentation.grand-challenge.org/COVID-19-20/'
    )

    # performance summary (train set 5-fold cross-validation)

    # baselines
    # 3d_fullres nnUNetTrainerV2__nnUNetPlans_v2.1						            0.7441
    # 3d_lowres nnUNetTrainerV2__nnUNetPlans_v2.1						            0.745

    # models used for test set prediction
    # 3d_fullres nnUNetTrainerV2_ResencUNet_DA3__nnUNetPlans_FabiansResUNet_v2.1	0.7543
    # 3d_fullres nnUNetTrainerV2_ResencUNet__nnUNetPlans_FabiansResUNet_v2.1		0.7527
    # 3d_lowres nnUNetTrainerV2_ResencUNet_DA3_BN__nnUNetPlans_FabiansResUNet_v2.1	0.7513
    labelstr = join(out_base, "labelsTr")
    maybe_mkdir_p(imagestr)
    maybe_mkdir_p(labelstr)

    case_ids = subdirs(kits_data_dir, prefix='case_', join=False)
    for c in case_ids:
        if isfile(join(kits_data_dir, c, kits_segmentation_filename)):
            shutil_sol.copyfile(
                join(kits_data_dir, c, kits_segmentation_filename),
                join(labelstr, c + '.nii.gz'))
            shutil_sol.copyfile(join(kits_data_dir, c, 'imaging.nii.gz'),
                                join(imagestr, c + '_0000.nii.gz'))

    generate_dataset_json(
        join(out_base, 'dataset.json'),
        imagestr,
        None, ('CT', ), {
            0: 'background',
            1: "kidney",
            2: "tumor",
            3: "cyst",
        },
        task_name,
        license=
        'see https://kits21.kits-challenge.org/participate#download-block',
        dataset_description='see https://kits21.kits-challenge.org/',
        dataset_reference=
        'https://www.sciencedirect.com/science/article/abs/pii/S1361841520301857, '
        'https://kits21.kits-challenge.org/',
        dataset_release='0')