Exemple #1
0
def test_bounding_box(download_data_testing_test_files, train_lst, target_lst,
                      config):
    # Create mask
    mask_coord = [20, 40, 20, 90, 0, 25]
    mx1, mx2, my1, my2, mz1, mz2 = mask_coord
    mask = np.zeros((96, 96, 96))
    mask[mx1:mx2 + 1, my1:my2 + 1, mz1:mz2 + 1] = 1
    coord = imed_obj_detect.get_bounding_boxes(mask)
    assert coord[0] == mask_coord

    loader_params = {
        "data_list": train_lst,
        "dataset_type": "training",
        "requires_undo": False,
        "path_data": [__data_testing_dir__],
        "target_suffix": target_lst,
        "extensions": [".nii.gz"],
        "slice_filter_params": {
            "filter_empty_mask": False,
            "filter_empty_input": True
        },
        "slice_axis": "axial"
    }

    if "Modified3DUNet" in config:
        config['model_params']["name"] = "Modified3DUNet"
        config['model_params'].update(config["Modified3DUNet"])

    bounding_box_dict = {}
    bounding_box_path = os.path.join(PATH_OUTPUT, 'bounding_boxes.json')
    if not os.path.exists(PATH_OUTPUT):
        os.mkdir(PATH_OUTPUT)
    current_dir = os.getcwd()
    sub = train_lst[0]
    contrast = config['contrast_params']['contrast_lst'][0]
    bb_path = os.path.join(current_dir, __data_testing_dir__, sub, "anat",
                           sub + "_" + contrast + ".nii.gz")
    bounding_box_dict[bb_path] = coord
    with open(bounding_box_path, 'w') as fp:
        json.dump(bounding_box_dict, fp, indent=4)

    # Update loader_params with config
    loader_params.update(config)

    bids_df = imed_loader_utils.BidsDataframe(loader_params,
                                              __tmp_dir__,
                                              derivatives=True)

    ds = imed_loader.load_dataset(bids_df, **loader_params)

    handler = ds.handlers if "Modified3DUNet" in config else ds.indexes
    for index in handler:
        seg_pair, _ = index
        if "Modified3DUNet" in config:
            assert seg_pair['input'][0].shape[-3:] == (mx2 - mx1, my2 - my1,
                                                       mz2 - mz1)
        else:
            assert seg_pair['input'][0].shape[-2:] == (mx2 - mx1, my2 - my1)

    shutil.rmtree(PATH_OUTPUT)
Exemple #2
0
def test_load_dataset_2d_png(download_data_testing_test_files,
                             loader_parameters, model_parameters,
                             transform_parameters):
    """
    Test to make sure load_dataset runs with 2D PNG files, writes corresponding NIfTI files,
    and binarizes ground-truth values to 0 and 1.
    """
    loader_parameters.update({LoaderParamsKW.MODEL_PARAMS: model_parameters})
    bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
    data_lst = ['sub-rat3_ses-01_sample-data9_SEM.png']
    ds = imed_loader.load_dataset(
        bids_df, **{
            **loader_parameters,
            **{
                'data_list': data_lst,
                'transforms_params': transform_parameters,
                'dataset_type': 'training'
            }
        })
    fname_png = bids_df.df[bids_df.df['filename'] ==
                           data_lst[0]]['path'].values[0]
    fname_nii = imed_loader_utils.update_filename_to_nifti(fname_png)
    assert Path(fname_nii).exists() == 1
    assert ds[0]['input'].shape == (1, 756, 764)
    assert ds[0]['gt'].shape == (1, 756, 764)
    assert np.unique(ds[0]['gt']).tolist() == [0, 1]
Exemple #3
0
def test_get_target_filename_list_multiple_raters(loader_parameters,
                                                  model_parameters,
                                                  transform_parameters):
    """
    Test that all target_suffix are considered for target filename when list
    """
    loader_parameters.update({LoaderParamsKW.MODEL_PARAMS: model_parameters})
    bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
    data_lst = ['sub-rat3_ses-01_sample-data9_SEM.png']
    test_ds = imed_loader.load_dataset(
        bids_df, **{
            **loader_parameters,
            **{
                'data_list': data_lst,
                'transforms_params': transform_parameters,
                'dataset_type': 'training'
            }
        })
    target_filename = test_ds.filename_pairs[0][1]

    assert len(target_filename) == len(
        loader_parameters[LoaderParamsKW.TARGET_SUFFIX])
    assert len(target_filename[0]) == len(
        loader_parameters[LoaderParamsKW.TARGET_SUFFIX][0])
    assert len(target_filename[1]) == len(
        loader_parameters[LoaderParamsKW.TARGET_SUFFIX][1])
Exemple #4
0
def test_sampler(download_data_testing_test_files, transforms_dict, train_lst,
                 target_lst, roi_params):
    cuda_available, device = imed_utils.define_device(GPU_ID)

    loader_params = {
        "transforms_params": transforms_dict,
        "data_list": train_lst,
        "dataset_type": "training",
        "requires_undo": False,
        "contrast_params": {
            "contrast_lst": ['T2w'],
            "balance": {}
        },
        "path_data": [__data_testing_dir__],
        "target_suffix": target_lst,
        "extensions": [".nii.gz"],
        "roi_params": roi_params,
        "model_params": {
            "name": "Unet"
        },
        "slice_filter_params": {
            "filter_empty_mask": False,
            "filter_empty_input": True
        },
        "slice_axis": "axial",
        "multichannel": False
    }
    # Get Training dataset
    bids_df = imed_loader_utils.BidsDataframe(loader_params,
                                              __tmp_dir__,
                                              derivatives=True)
    ds_train = imed_loader.load_dataset(bids_df, **loader_params)

    print('\nLoading without sampling')
    train_loader = DataLoader(ds_train,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              pin_memory=True,
                              collate_fn=imed_loader_utils.imed_collate,
                              num_workers=0)
    neg_percent, pos_percent = _cmpt_label(train_loader)
    assert abs(neg_percent - pos_percent) > 20

    print('\nLoading with sampling')
    train_loader_balanced = DataLoader(
        ds_train,
        batch_size=BATCH_SIZE,
        sampler=imed_loader_utils.BalancedSampler(ds_train),
        shuffle=False,
        pin_memory=True,
        collate_fn=imed_loader_utils.imed_collate,
        num_workers=0)

    neg_percent_bal, pos_percent_bal = _cmpt_label(train_loader_balanced)
    # Check if the loader is more balanced. The actual distribution comes from a probabilistic model
    # This is however not very efficient to get close to 50 %
    # in the case where we have 16 slices, with 87.5 % of one class (positive sample).
    assert abs(neg_percent_bal - pos_percent_bal) < abs(neg_percent -
                                                        pos_percent)
Exemple #5
0
def test_bounding_box(train_lst, target_lst, config):
    # Create mask
    mask_coord = [20, 40, 20, 90, 0, 25]
    mx1, mx2, my1, my2, mz1, mz2 = mask_coord
    mask = np.zeros((96, 96, 96))
    mask[mx1:mx2 + 1, my1:my2 + 1, mz1:mz2 + 1] = 1
    coord = imed_obj_detect.get_bounding_boxes(mask)
    assert coord[0] == mask_coord

    loader_params = {
        "data_list": train_lst,
        "dataset_type": "training",
        "requires_undo": False,
        "bids_path": PATH_BIDS,
        "target_suffix": target_lst,
        "slice_filter_params": {
            "filter_empty_mask": False,
            "filter_empty_input": True
        },
        "slice_axis": "axial"
    }

    if "UNet3D" in config:
        config['model_params']["name"] = "UNet3D"
        config['model_params'].update(config["UNet3D"])

    bounding_box_dict = {}
    bounding_box_path = os.path.join(LOG_DIR, 'bounding_boxes.json')
    if not os.path.exists(LOG_DIR):
        os.mkdir(LOG_DIR)
    current_dir = os.getcwd()
    sub = train_lst[0]
    contrast = config['contrast_params']['contrast_lst'][0]
    bb_path = os.path.join(current_dir, PATH_BIDS, sub, "anat",
                           sub + "_" + contrast + ".nii.gz")
    bounding_box_dict[bb_path] = coord
    with open(bounding_box_path, 'w') as fp:
        json.dump(bounding_box_dict, fp, indent=4)

    # Update loader_params with config
    loader_params.update(config)
    ds = imed_loader.load_dataset(**loader_params)

    handler = ds.handlers if "UNet3D" in config else ds.indexes
    for index in handler:
        seg_pair, _ = index
        if "UNet3D" in config:
            assert seg_pair['input'][0].shape[-3:] == (mx2 - mx1, my2 - my1,
                                                       mz2 - mz1)
        else:
            assert seg_pair['input'][0].shape[-2:] == (mx2 - mx1, my2 - my1)

    shutil.rmtree(LOG_DIR)
Exemple #6
0
def get_dataset(loader_params, data_lst, transform_params, cuda_available,
                device, ds_type):
    ds = imed_loader.load_dataset(**{
        **loader_params,
        **{
            'data_list': data_lst,
            'transforms_params': transform_params,
            'dataset_type': ds_type
        }
    },
                                  device=device,
                                  cuda_available=cuda_available)
    return ds
Exemple #7
0
def test_slice_filter(download_data_testing_test_files, transforms_dict,
                      train_lst, target_lst, roi_params, slice_filter_params):
    if "ROICrop" in transforms_dict and roi_params["suffix"] is None:
        return

    cuda_available, device = imed_utils.define_device(GPU_ID)

    loader_params = {
        "transforms_params": transforms_dict,
        "data_list": train_lst,
        "dataset_type": "training",
        "requires_undo": False,
        "contrast_params": {
            "contrast_lst": ['T2w'],
            "balance": {}
        },
        "path_data": [__data_testing_dir__],
        "target_suffix": target_lst,
        "extensions": [".nii.gz"],
        "roi_params": roi_params,
        "model_params": {
            "name": "Unet"
        },
        "slice_filter_params": slice_filter_params,
        "patch_filter_params": {
            "filter_empty_mask": False,
            "filter_empty_input": False
        },
        "slice_axis": "axial",
        "multichannel": False
    }
    # Get Training dataset
    bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True)
    ds_train = imed_loader.load_dataset(bids_df, **loader_params)

    logger.info(f"\tNumber of loaded slices: {len(ds_train)}")

    train_loader = DataLoader(ds_train,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              pin_memory=True,
                              collate_fn=imed_loader_utils.imed_collate,
                              num_workers=0)
    cmpt_neg, cmpt_pos = _cmpt_slice(train_loader)
    if slice_filter_params["filter_empty_mask"]:
        assert cmpt_neg == 0
        assert cmpt_pos != 0
    else:
        # We verify if there are still some negative slices (they are removed with our filter)
        assert cmpt_neg != 0 and cmpt_pos != 0
    logger.info(f"\tNumber of Neg/Pos slices in GT: {cmpt_neg/cmpt_pos}")
Exemple #8
0
def test_load_dataset_2d_png(download_data_testing_test_files,
                             loader_parameters, model_parameters, transform_parameters):
    """
    Test to make sure load_dataset runs with 2D PNG data.
    """
    loader_parameters.update({"model_params": model_parameters})
    bids_df = imed_loader_utils.BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
    data_lst = ['sub-rat3_ses-01_sample-data9_SEM.png']
    ds = imed_loader.load_dataset(bids_df,
                                  **{**loader_parameters, **{'data_list': data_lst,
                                                             'transforms_params': transform_parameters,
                                                             'dataset_type': 'training'}})
    assert ds[0]['input'].shape == (1, 756, 764)
    assert ds[0]['gt'].shape == (1, 756, 764)
Exemple #9
0
def test_get_target_filename_list(loader_parameters, model_parameters, transform_parameters):
    """
    Test that all target_suffix are considered for target filename when list
    """
    loader_parameters.update({"model_params": model_parameters})
    bids_df = imed_loader_utils.BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
    data_lst = ['sub-rat3_ses-01_sample-data9_SEM.png']
    test_ds = imed_loader.load_dataset(bids_df,
                                  **{**loader_parameters, **{'data_list': data_lst,
                                                             'transforms_params': transform_parameters,
                                                             'dataset_type': 'training'}})

    target_filename = test_ds.filename_pairs[0][1]
    
    assert len(target_filename) == len(loader_parameters["target_suffix"])
Exemple #10
0
def test_slice_filter(transforms_dict, train_lst, target_lst, roi_params,
                      slice_filter_params):
    if "ROICrop" in transforms_dict and roi_params["suffix"] == None:
        return

    cuda_available, device = imed_utils.define_device(GPU_NUMBER)

    loader_params = {
        "transforms_params": transforms_dict,
        "data_list": train_lst,
        "dataset_type": "training",
        "requires_undo": False,
        "contrast_params": {
            "contrast_lst": ['T2w'],
            "balance": {}
        },
        "bids_path": PATH_BIDS,
        "target_suffix": target_lst,
        "roi_params": roi_params,
        "model_params": {
            "name": "Unet"
        },
        "slice_filter_params": slice_filter_params,
        "slice_axis": "axial",
        "multichannel": False
    }
    # Get Training dataset
    ds_train = imed_loader.load_dataset(**loader_params)

    print('\tNumber of loaded slices: {}'.format(len(ds_train)))

    train_loader = DataLoader(ds_train,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              pin_memory=True,
                              collate_fn=imed_loader_utils.imed_collate,
                              num_workers=0)
    print('\tNumber of Neg/Pos slices in GT.')
    cmpt_neg, cmpt_pos = _cmpt_slice(train_loader)
    if slice_filter_params["filter_empty_mask"]:
        assert cmpt_neg == 0
        assert cmpt_pos != 0
    else:
        # We verify if there are still some negative slices (they are removed with our filter)
        assert cmpt_neg != 0 and cmpt_pos != 0
Exemple #11
0
def test_2d_patches_and_resampling(download_data_testing_test_files,
                                   loader_parameters, model_parameters,
                                   transform_parameters):
    """
    Test that 2d patching is done properly.
    Test that microscopy pixelsize and resampling are applied on the right dimensions.
    """
    loader_parameters.update({LoaderParamsKW.MODEL_PARAMS: model_parameters})
    bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
    data_lst = ['sub-rat3_ses-01_sample-data9_SEM.png']
    ds = imed_loader.load_dataset(
        bids_df, **{
            **loader_parameters,
            **{
                'data_list': data_lst,
                'transforms_params': transform_parameters,
                'dataset_type': 'training'
            }
        })
    assert ds.is_2d_patch == True
    assert ds[0]['input'].shape == (1, 256, 128)
    assert ds[0]['input_metadata'][0].metadata[MetadataKW.INDEX_SHAPE] == (
        1512, 382)
    assert len(ds) == 28
def test_patch_filter(download_data_testing_test_files, transforms_dict,
                      train_lst, target_lst, patch_filter_params,
                      dataset_type):

    cuda_available, device = imed_utils.define_device(GPU_ID)

    loader_params = {
        "transforms_params": transforms_dict,
        "data_list": train_lst,
        "dataset_type": dataset_type,
        "requires_undo": False,
        "contrast_params": {
            "contrast_lst": ['SEM'],
            "balance": {}
        },
        "path_data": [os.path.join(__data_testing_dir__, "microscopy_png")],
        "bids_config": f"{path_repo_root}/ivadomed/config/config_bids.json",
        "target_suffix": target_lst,
        "extensions": [".png"],
        "roi_params": {
            "suffix": None,
            "slice_filter_roi": None
        },
        "model_params": {
            "name": "Unet",
            "length_2D": [32, 32],
            "stride_2D": [32, 32]
        },
        "slice_filter_params": {
            "filter_empty_mask": False,
            "filter_empty_input": False
        },
        "patch_filter_params": patch_filter_params,
        "slice_axis": "axial",
        "multichannel": False
    }
    # Get Training dataset
    bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True)
    ds = imed_loader.load_dataset(bids_df, **loader_params)

    logger.info(f"\tNumber of loaded patches: {len(ds)}")

    loader = DataLoader(ds,
                        batch_size=BATCH_SIZE,
                        shuffle=True,
                        pin_memory=True,
                        collate_fn=imed_loader_utils.imed_collate,
                        num_workers=0)
    logger.info("\tNumber of Neg/Pos patches in GT.")
    cmpt_neg, cmpt_pos = _cmpt_slice(loader)
    if patch_filter_params["filter_empty_mask"]:
        if dataset_type == "testing":
            # Filters on patches are not applied at testing time
            assert cmpt_neg + cmpt_pos == len(ds)
        else:
            # Filters on patches are applied at training time
            assert cmpt_neg == 0
            assert cmpt_pos != 0
    else:
        # We verify if there are still some negative patches (they are removed with our filter)
        assert cmpt_neg != 0 and cmpt_pos != 0
def test_unet_time(train_lst, target_lst, config):
    cuda_available, device = imed_utils.define_device(GPU_NUMBER)

    loader_params = {
        "data_list": train_lst,
        "dataset_type": "training",
        "requires_undo": False,
        "bids_path": PATH_BIDS,
        "target_suffix": target_lst,
        "slice_filter_params": {
            "filter_empty_mask": False,
            "filter_empty_input": True
        },
        "slice_axis": "axial"
    }
    # Update loader_params with config
    loader_params.update(config)
    # Get Training dataset
    ds_train = imed_loader.load_dataset(**loader_params)

    # Loader
    train_loader = DataLoader(ds_train,
                              batch_size=1 if config["model_params"]["name"]
                              == "UNet3D" else BATCH_SIZE,
                              shuffle=True,
                              pin_memory=True,
                              collate_fn=imed_loader_utils.imed_collate,
                              num_workers=1)

    # MODEL
    model_params = loader_params["model_params"]
    model_params.update(MODEL_DEFAULT)
    # Get in_channel from contrast_lst
    if loader_params["multichannel"]:
        model_params["in_channel"] = len(
            loader_params["contrast_params"]["contrast_lst"])
    else:
        model_params["in_channel"] = 1
    # Get out_channel from target_suffix
    model_params["out_channel"] = len(loader_params["target_suffix"])
    model_class = getattr(imed_models, model_params["name"])
    model = model_class(**model_params)

    print("Training {}".format(model_params["name"]))
    if cuda_available:
        model.cuda()

    step_scheduler_batch = False
    # TODO: Add optim in pytest
    optimizer = optim.Adam(model.parameters(), lr=INIT_LR)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, N_EPOCHS)

    # TODO: add to pytest
    loss_fct = imed_losses.DiceLoss()

    load_lst, pred_lst, opt_lst, schedul_lst, init_lst, gen_lst = [], [], [], [], [], []
    for epoch in tqdm(range(1, N_EPOCHS + 1), desc="Training"):
        start_time = time.time()

        start_init = time.time()

        model.train()

        tot_init = time.time() - start_init
        init_lst.append(tot_init)

        num_steps = 0
        start_gen = 0
        for i, batch in enumerate(train_loader):
            if i > 0:
                tot_gen = time.time() - start_gen
                gen_lst.append(tot_gen)

            start_load = time.time()
            input_samples = imed_utils.cuda(batch["input"], cuda_available)
            gt_samples = imed_utils.cuda(batch["gt"],
                                         cuda_available,
                                         non_blocking=True)

            tot_load = time.time() - start_load
            load_lst.append(tot_load)

            start_pred = time.time()
            preds = model(input_samples)
            tot_pred = time.time() - start_pred
            pred_lst.append(tot_pred)

            start_opt = time.time()
            loss = loss_fct(preds, gt_samples)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if step_scheduler_batch:
                scheduler.step()

            num_steps += 1
            tot_opt = time.time() - start_opt
            opt_lst.append(tot_opt)

            start_gen = time.time()

        start_schedul = time.time()
        if not step_scheduler_batch:
            scheduler.step()
        tot_schedul = time.time() - start_schedul
        schedul_lst.append(tot_schedul)

        end_time = time.time()
        total_time = end_time - start_time
        tqdm.write("Epoch {} took {:.2f} seconds.".format(epoch, total_time))

    print('Mean SD init {} -- {}'.format(np.mean(init_lst), np.std(init_lst)))
    print('Mean SD load {} -- {}'.format(np.mean(load_lst), np.std(load_lst)))
    print('Mean SD pred {} -- {}'.format(np.mean(pred_lst), np.std(pred_lst)))
    print('Mean SDopt {} --  {}'.format(np.mean(opt_lst), np.std(opt_lst)))
    print('Mean SD gen {} -- {}'.format(np.mean(gen_lst), np.std(gen_lst)))
    print('Mean SD scheduler {} -- {}'.format(np.mean(schedul_lst),
                                              np.std(schedul_lst)))
Exemple #14
0
def test_microscopy_pixelsize(download_data_testing_test_files,
                              loader_parameters, model_parameters):
    """
    Test that PixelSize and PixelSizeUnits microscopy metadata
    are handled properly for PixelSizeUnits: "mm", "um" and "nm"
    """
    loader_parameters.update({LoaderParamsKW.MODEL_PARAMS: model_parameters})
    bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)

    # PixelSizeUnits: "mm"
    data_lst = ['sub-rat2_sample-data5_SEM.png']
    transform_parameters = {
        TransformationKW.RESAMPLE: {
            "wspace": 0.000093,
            "hspace": 0.000093
        }
    }
    ds = imed_loader.load_dataset(
        bids_df, **{
            **loader_parameters,
            **{
                'data_list': data_lst,
                'transforms_params': transform_parameters,
                'dataset_type': 'training'
            }
        })
    assert ds[0]['input'].shape == (1, 725, 725)

    # PixelSizeUnits: "um"
    data_lst = ['sub-rat3_ses-02_sample-data11_run-1_SEM.png']
    transform_parameters = {
        TransformationKW.RESAMPLE: {
            "wspace": 0.0001,
            "hspace": 0.0001
        }
    }
    ds = imed_loader.load_dataset(
        bids_df, **{
            **loader_parameters,
            **{
                'data_list': data_lst,
                'transforms_params': transform_parameters,
                'dataset_type': 'training'
            }
        })
    assert ds[0]['input'].shape == (1, 839, 769)

    # PixelSizeUnits: "nm"
    data_lst = ['sub-rat3_ses-02_sample-data10_SEM.png']
    transform_parameters = {
        TransformationKW.RESAMPLE: {
            "wspace": 0.0001,
            "hspace": 0.0001
        }
    }
    ds = imed_loader.load_dataset(
        bids_df, **{
            **loader_parameters,
            **{
                'data_list': data_lst,
                'transforms_params': transform_parameters,
                'dataset_type': 'training'
            }
        })
    assert ds[0]['input'].shape == (1, 758, 737)
def test_inference_2d_microscopy(download_data_testing_test_files, transforms_dict, test_lst, target_lst, roi_params,
        testing_params):
    """
    This test checks if the number of NifTI predictions equals the number of test subjects on 2d microscopy data.
    Used to catch a bug where the last slice of the last volume wasn't appended to the prediction
    (see: https://github.com/ivadomed/ivadomed/issues/823)
    Also tests the conversions to PNG predictions when source files are not Nifti and checks if the number of PNG
    predictions is 2x the number of test subjects (2-class model, outputs 1 PNG per class per subject).
    """
    cuda_available, device = imed_utils.define_device(GPU_ID)

    model_params = {"name": "Unet", "is_2d": True, "out_channel": 3}
    loader_params = {
        "transforms_params": transforms_dict,
        "data_list": test_lst,
        "dataset_type": "testing",
        "requires_undo": True,
        "contrast_params": {"contrast_lst": ['SEM'], "balance": {}},
        "path_data": [str(Path(__data_testing_dir__, "microscopy_png"))],
        "bids_config": f"{path_repo_root}/ivadomed/config/config_bids.json",
        "target_suffix": target_lst,
        "extensions": [".png"],
        "roi_params": roi_params,
        "slice_filter_params": {"filter_empty_mask": False, "filter_empty_input": True},
        "patch_filter_params": {"filter_empty_mask": False, "filter_empty_input": False},
        "slice_axis": SLICE_AXIS,
        "multichannel": False
    }
    loader_params.update({"model_params": model_params})

    bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True)

    # Get Testing dataset
    ds_test = imed_loader.load_dataset(bids_df, **loader_params)
    test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE,
                             shuffle=False, pin_memory=True,
                             collate_fn=imed_loader_utils.imed_collate,
                             num_workers=0)

    # Undo transform
    val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict))

    # Update testing_params
    testing_params.update({
        "slice_axis": loader_params["slice_axis"],
        "target_suffix": loader_params["target_suffix"],
        "undo_transforms": val_undo_transform
    })

    # Model
    model = imed_models.Unet(out_channel=model_params['out_channel'])

    if cuda_available:
        model.cuda()
    model.eval()

    if not __output_dir__.is_dir():
        __output_dir__.mkdir(parents=True, exist_ok=True)

    preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader,
                                                   model=model,
                                                   model_params=model_params,
                                                   testing_params=testing_params,
                                                   ofolder=str(__output_dir__),
                                                   cuda_available=cuda_available)

    assert len([x for x in __output_dir__.iterdir() if x.name.endswith(".nii.gz")]) == len(test_lst)
    assert len([x for x in __output_dir__.iterdir() if x.name.endswith(".png")]) == 2*len(test_lst)
Exemple #16
0
def run_command(context, n_gif=0, thr_increment=None, resume_training=False):
    """Run main command.

    This function is central in the ivadomed project as training / testing / evaluation commands
    are run via this function. All the process parameters are defined in the config.

    Args:
        context (dict): Dictionary containing all parameters that are needed for a given process. See
            :doc:`configuration_file` for more details.
        n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The
            parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows
            predictions of a given slice from the validation sub-dataset. They are saved within the output path.
        thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and
            the training + validation sub-dataset to find the optimal binarization threshold. The specified value
            indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1).
        resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the output directory specified with flag "--path-output" or via the config file "output_path" '            This training state is saved everytime a new best model is saved in the log
            argument) for resume training directory.

    Returns:
        float or pandas.DataFrame or None:
            * If "train" command: Returns floats: best loss score for both training and validation.
            * If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of
              the testing sub-dataset and return the prediction metrics before evaluation.
            * If "segment" command: No return value.

    """
    command = copy.deepcopy(context["command"])
    path_output = set_output_path(context)

    # Create a log with the version of the Ivadomed software and the version of the Annexed dataset (if present)
    create_dataset_and_ivadomed_version_log(context)

    cuda_available, device = imed_utils.define_device(context['gpu_ids'][0])

    # BACKWARDS COMPATIBILITY: If bids_path is string, assign to list - Do this here so it propagates to all functions
    context['loader_parameters']['path_data'] = imed_utils.format_path_data(
        context['loader_parameters']['path_data'])

    # Loader params
    loader_params = set_loader_params(context, command == "train")

    # Get transforms for each subdataset
    transform_train_params, transform_valid_params, transform_test_params = \
        imed_transforms.get_subdatasets_transforms(context["transformation"])

    # MODEL PARAMETERS
    model_params, loader_params = set_model_params(context, loader_params)

    if command == 'segment':
        run_segment_command(context, model_params)
        return

    # Get subject lists. "segment" command uses all participants of data path, hence no need to split
    train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list(
        context["split_dataset"], context['loader_parameters']['path_data'],
        path_output, context["loader_parameters"]['subject_selection'])
    # TESTING PARAMS
    # Aleatoric uncertainty
    if context['uncertainty'][
            'aleatoric'] and context['uncertainty']['n_it'] > 0:
        transformation_dict = transform_train_params
    else:
        transformation_dict = transform_test_params
    undo_transforms = imed_transforms.UndoCompose(
        imed_transforms.Compose(transformation_dict, requires_undo=True))
    testing_params = copy.deepcopy(context["training_parameters"])
    testing_params.update({'uncertainty': context["uncertainty"]})
    testing_params.update({
        'target_suffix': loader_params["target_suffix"],
        'undo_transforms': undo_transforms,
        'slice_axis': loader_params['slice_axis']
    })

    if command == "train":
        imed_utils.display_selected_transfoms(transform_train_params,
                                              dataset_type=["training"])
        imed_utils.display_selected_transfoms(transform_valid_params,
                                              dataset_type=["validation"])
    elif command == "test":
        imed_utils.display_selected_transfoms(transformation_dict,
                                              dataset_type=["testing"])

    # Check if multiple raters
    check_multiple_raters(command != "train", loader_params)

    if command == 'train':
        # Get Validation dataset
        ds_valid = get_dataset(loader_params, valid_lst,
                               transform_valid_params, cuda_available, device,
                               'validation')

        # Get Training dataset
        ds_train = get_dataset(loader_params, train_lst,
                               transform_train_params, cuda_available, device,
                               'training')
        metric_fns = imed_metrics.get_metric_fns(ds_train.task)

        # If FiLM, normalize data
        if 'film_layers' in model_params and any(model_params['film_layers']):
            model_params, ds_train, ds_valid, train_onehotencoder = \
                film_normalize_data(context, model_params, ds_train, ds_valid, path_output)
        else:
            train_onehotencoder = None

        # Model directory
        create_path_model(context, model_params, ds_train, path_output,
                          train_onehotencoder)

        save_config_file(context, path_output)

        # RUN TRAINING
        best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train(
            model_params=model_params,
            dataset_train=ds_train,
            dataset_val=ds_valid,
            training_params=context["training_parameters"],
            path_output=path_output,
            device=device,
            cuda_available=cuda_available,
            metric_fns=metric_fns,
            n_gif=n_gif,
            resume_training=resume_training,
            debugging=context["debugging"])

    if thr_increment:
        # LOAD DATASET
        if command != 'train':  # If command == train, then ds_valid already load
            # Get Validation dataset
            ds_valid = get_dataset(loader_params, valid_lst,
                                   transform_valid_params, cuda_available,
                                   device, 'validation')
        # Get Training dataset with no Data Augmentation
        ds_train = get_dataset(loader_params, train_lst,
                               transform_valid_params, cuda_available, device,
                               'training')

        # Choice of optimisation metric
        metric = "recall_specificity" if model_params[
            "name"] in imed_utils.CLASSIFIER_LIST else "dice"
        # Model path
        model_path = os.path.join(path_output, "best_model.pt")
        # Run analysis
        thr = imed_testing.threshold_analysis(model_path=model_path,
                                              ds_lst=[ds_train, ds_valid],
                                              model_params=model_params,
                                              testing_params=testing_params,
                                              metric=metric,
                                              increment=thr_increment,
                                              fname_out=os.path.join(
                                                  path_output, "roc.png"),
                                              cuda_available=cuda_available)

        # Update threshold in config file
        context["postprocessing"]["binarize_prediction"] = {"thr": thr}
        save_config_file(context, path_output)

    if command == 'train':
        return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss

    if command == 'test':
        # LOAD DATASET
        ds_test = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': test_lst,
                'transforms_params': transformation_dict,
                'dataset_type': 'testing',
                'requires_undo': True
            }
        },
                                           device=device,
                                           cuda_available=cuda_available)

        metric_fns = imed_metrics.get_metric_fns(ds_test.task)

        if 'film_layers' in model_params and any(model_params['film_layers']):
            ds_test, model_params = update_film_model_params(
                context, ds_test, model_params, path_output)

        # RUN INFERENCE
        pred_metrics = imed_testing.test(
            model_params=model_params,
            dataset_test=ds_test,
            testing_params=testing_params,
            path_output=path_output,
            device=device,
            cuda_available=cuda_available,
            metric_fns=metric_fns,
            postprocessing=context['postprocessing'])

        # RUN EVALUATION
        df_results = imed_evaluation.evaluate(
            path_data=loader_params['path_data'],
            path_output=path_output,
            target_suffix=loader_params["target_suffix"],
            eval_params=context["evaluation_parameters"])
        return df_results, pred_metrics
def test_inference_target_suffix(download_data_testing_test_files, transforms_dict, test_lst, target_lst, roi_params,
        testing_params):
    """
    This test checks if the filename(s) of the prediction(s) saved as NifTI file(s) in the pred_masks
    dir conform to the target_suffix or not. Thus, independent of underscore(s) in the target_suffix. As a result,
    _seg-axon-manual or _seg-axon_manual should yield the same filename(s).
    (c.f: https://github.com/ivadomed/ivadomed/issues/1135)
    """
    cuda_available, device = imed_utils.define_device(GPU_ID)

    model_params = {"name": "Unet", "is_2d": True, "out_channel": 3}
    loader_params = {
        "transforms_params": transforms_dict,
        "data_list": test_lst,
        "dataset_type": "testing",
        "requires_undo": True,
        "contrast_params": {"contrast_lst": ['SEM'], "balance": {}},
        "path_data": [str(Path(__data_testing_dir__, "microscopy_png"))],
        "bids_config": f"{path_repo_root}/ivadomed/config/config_bids.json",
        "target_suffix": target_lst,
        "extensions": [".png"],
        "roi_params": roi_params,
        "slice_filter_params": {"filter_empty_mask": False, "filter_empty_input": True},
        "patch_filter_params": {"filter_empty_mask": False, "filter_empty_input": False},
        "slice_axis": SLICE_AXIS,
        "multichannel": False
    }
    loader_params.update({"model_params": model_params})

    # restructuring the dataset 
    gt_path = f'{loader_params["path_data"][0]}/derivatives/labels/'
    for file_path in Path(gt_path).rglob('*.png'):
      src_filename = file_path.resolve()
      dst_filename = '_'.join(str(src_filename).rsplit('-', 1))
      src_filename.rename(Path(dst_filename))

    bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True)

    ds_test = imed_loader.load_dataset(bids_df, **loader_params)
    test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE,
                             shuffle=False, pin_memory=True,
                             collate_fn=imed_loader_utils.imed_collate,
                             num_workers=0)

    # Undo transform
    val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict))

    # Update testing_params
    testing_params.update({
        "slice_axis": loader_params["slice_axis"],
        "target_suffix": loader_params["target_suffix"],
        "undo_transforms": val_undo_transform
    })

    # Model
    model = imed_models.Unet(out_channel=model_params['out_channel'])

    if cuda_available:
        model.cuda()
    model.eval()

    if not __output_dir__.is_dir():
        __output_dir__.mkdir(parents=True, exist_ok=True)

    preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader,
                                                   model=model,
                                                   model_params=model_params,
                                                   testing_params=testing_params,
                                                   ofolder=str(__output_dir__),
                                                   cuda_available=cuda_available)

    for x in __output_dir__.iterdir():
      if x.name.endswith('_pred.nii.gz'):
        assert x.name.rsplit('_', 1)[0].endswith(loader_params['contrast_params']['contrast_lst'][-1]), ( 
            'Incompatible filename(s) of the prediction(s) saved as NifTI file(s)!'
        )
def test_unet_time(download_data_testing_test_files, train_lst, target_lst, config):
    cuda_available, device = imed_utils.define_device(GPU_ID)

    loader_params = {
        "data_list": train_lst,
        "dataset_type": "training",
        "requires_undo": False,
        "path_data": [__data_testing_dir__],
        "target_suffix": target_lst,
        "extensions": [".nii.gz"],
        "slice_filter_params": {"filter_empty_mask": False, "filter_empty_input": True},
        "patch_filter_params": {"filter_empty_mask": False, "filter_empty_input": False},
        "slice_axis": "axial"
    }
    # Update loader_params with config
    loader_params.update(config)
    # Get Training dataset
    bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True)
    ds_train = imed_loader.load_dataset(bids_df, **loader_params)

    # Loader
    train_loader = DataLoader(ds_train,
                              batch_size=1 if config["model_params"]["name"] == "Modified3DUNet" else BATCH_SIZE,
                              shuffle=True, pin_memory=True,
                              collate_fn=imed_loader_utils.imed_collate,
                              num_workers=1)

    # MODEL
    model_params = loader_params["model_params"]
    model_params.update(MODEL_DEFAULT)
    # Get in_channel from contrast_lst
    if loader_params["multichannel"]:
        model_params["in_channel"] = len(loader_params["contrast_params"]["contrast_lst"])
    else:
        model_params["in_channel"] = 1
    # Get out_channel from target_suffix
    model_params["out_channel"] = len(loader_params["target_suffix"])
    model_class = getattr(imed_models, model_params["name"])
    model = model_class(**model_params)

    logger.debug(f"Training {model_params['name']}")
    if cuda_available:
        model.cuda()

    step_scheduler_batch = False
    # TODO: Add optim in pytest
    optimizer = optim.Adam(model.parameters(), lr=INIT_LR)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, N_EPOCHS)

    # TODO: add to pytest
    loss_fct = imed_losses.DiceLoss()

    load_lst, pred_lst, opt_lst, schedule_lst, init_lst, gen_lst = [], [], [], [], [], []
    for epoch in tqdm(range(1, N_EPOCHS + 1), desc="Training"):
        start_time = time.time()

        start_init = time.time()

        model.train()

        tot_init = time.time() - start_init
        init_lst.append(tot_init)

        num_steps = 0
        start_gen = 0
        for i, batch in enumerate(train_loader):
            if i > 0:
                tot_gen = time.time() - start_gen
                gen_lst.append(tot_gen)

            start_load = time.time()
            input_samples = imed_utils.cuda(batch["input"], cuda_available)
            gt_samples = imed_utils.cuda(batch["gt"], cuda_available, non_blocking=True)

            tot_load = time.time() - start_load
            load_lst.append(tot_load)

            start_pred = time.time()

            if 'film_layers' in model_params:
                preds = model(input_samples, [[0, 1]])
            else:
                preds = model(input_samples)
            tot_pred = time.time() - start_pred
            pred_lst.append(tot_pred)

            start_opt = time.time()
            loss = loss_fct(preds, gt_samples)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if step_scheduler_batch:
                scheduler.step()

            num_steps += 1
            tot_opt = time.time() - start_opt
            opt_lst.append(tot_opt)

            start_gen = time.time()

        start_schedule = time.time()
        if not step_scheduler_batch:
            scheduler.step()
        tot_schedule = time.time() - start_schedule
        schedule_lst.append(tot_schedule)

        end_time = time.time()
        total_time = end_time - start_time
        tqdm.write("Epoch {} took {:.2f} seconds.".format(epoch, total_time))

    logger.info(f"Mean SD init {np.mean(init_lst)} -- {np.std(init_lst)}")
    logger.info(f"Mean SD load {np.mean(load_lst)} -- {np.std(load_lst)}")
    logger.info(f"Mean SD pred {np.mean(pred_lst)} -- {np.std(pred_lst)}")
    logger.info(f"Mean SDopt {np.mean(opt_lst)} --  {np.std(opt_lst)}")
    logger.info(f"Mean SD gen {np.mean(gen_lst)} -- {np.std(gen_lst)}")
    logger.info(f"Mean SD scheduler {np.mean(schedule_lst)} -- {np.std(schedule_lst)}")
Exemple #19
0
def run_command(context, n_gif=0, thr_increment=None, resume_training=False):
    """Run main command.

    This function is central in the ivadomed project as training / testing / evaluation commands are run via this
    function. All the process parameters are defined in the config.

    Args:
        context (dict): Dictionary containing all parameters that are needed for a given process. See
            :doc:`configuration_file` for more details.
        n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The
            parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows
            predictions of a given slice from the validation sub-dataset. They are saved within the log directory.
        thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and
            the training + validation sub-dataset to find the optimal binarization threshold. The specified value
            indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1).
        resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the log_directory) for resume training.
            This training state is saved everytime a new best model is saved in the log
            directory.

    Returns:
        Float or pandas Dataframe:
        If "train" command: Returns floats: best loss score for both training and validation.
        If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of the testing
            sub dataset and return the prediction metrics before evaluation.
        If "segment" command: No return value.
    """
    command = copy.deepcopy(context["command"])
    log_directory = copy.deepcopy(context["log_directory"])
    if not os.path.isdir(log_directory):
        print('Creating log directory: {}'.format(log_directory))
        os.makedirs(log_directory)
    else:
        print('Log directory already exists: {}'.format(log_directory))

    # Define device
    cuda_available, device = imed_utils.define_device(context['gpu'])

    # Get subject lists
    train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list(
        context["split_dataset"], context['loader_parameters']['bids_path'],
        log_directory)

    # Loader params
    loader_params = copy.deepcopy(context["loader_parameters"])
    if command == "train":
        loader_params["contrast_params"]["contrast_lst"] = loader_params[
            "contrast_params"]["training_validation"]
    else:
        loader_params["contrast_params"]["contrast_lst"] = loader_params[
            "contrast_params"]["testing"]
    if "FiLMedUnet" in context and context["FiLMedUnet"]["applied"]:
        loader_params.update(
            {"metadata_type": context["FiLMedUnet"]["metadata"]})

    # Get transforms for each subdataset
    transform_train_params, transform_valid_params, transform_test_params = \
        imed_transforms.get_subdatasets_transforms(context["transformation"])

    # MODEL PARAMETERS
    model_params = copy.deepcopy(context["default_model"])
    model_params["folder_name"] = copy.deepcopy(context["model_name"])
    model_context_list = [
        model_name for model_name in MODEL_LIST
        if model_name in context and context[model_name]["applied"]
    ]
    if len(model_context_list) == 1:
        model_params["name"] = model_context_list[0]
        model_params.update(context[model_context_list[0]])
    elif 'Modified3DUNet' in model_context_list and 'FiLMedUnet' in model_context_list and len(
            model_context_list) == 2:
        model_params["name"] = 'Modified3DUNet'
        for i in range(len(model_context_list)):
            model_params.update(context[model_context_list[i]])
    elif len(model_context_list) > 1:
        print(
            'ERROR: Several models are selected in the configuration file: {}.'
            'Please select only one (i.e. only one where: "applied": true).'.
            format(model_context_list))
        exit()

    model_params['is_2d'] = False if "Modified3DUNet" in model_params[
        'name'] else model_params['is_2d']
    # Get in_channel from contrast_lst
    if loader_params["multichannel"]:
        model_params["in_channel"] = len(
            loader_params["contrast_params"]["contrast_lst"])
    else:
        model_params["in_channel"] = 1
    # Get out_channel from target_suffix
    model_params["out_channel"] = len(loader_params["target_suffix"])
    # If multi-class output, then add background class
    if model_params["out_channel"] > 1:
        model_params.update({"out_channel": model_params["out_channel"] + 1})
    # Display for spec' check
    imed_utils.display_selected_model_spec(params=model_params)
    # Update loader params
    if 'object_detection_params' in context:
        object_detection_params = context['object_detection_params']
        object_detection_params.update({
            "gpu":
            context['gpu'],
            "log_directory":
            context['log_directory']
        })
        loader_params.update(
            {"object_detection_params": object_detection_params})

    loader_params.update({"model_params": model_params})

    # TESTING PARAMS
    # Aleatoric uncertainty
    if context['uncertainty'][
            'aleatoric'] and context['uncertainty']['n_it'] > 0:
        transformation_dict = transform_train_params
    else:
        transformation_dict = transform_test_params
    undo_transforms = imed_transforms.UndoCompose(
        imed_transforms.Compose(transformation_dict, requires_undo=True))
    testing_params = copy.deepcopy(context["training_parameters"])
    testing_params.update({'uncertainty': context["uncertainty"]})
    testing_params.update({
        'target_suffix': loader_params["target_suffix"],
        'undo_transforms': undo_transforms,
        'slice_axis': loader_params['slice_axis']
    })
    if command == "train":
        imed_utils.display_selected_transfoms(transform_train_params,
                                              dataset_type=["training"])
        imed_utils.display_selected_transfoms(transform_valid_params,
                                              dataset_type=["validation"])
    elif command == "test":
        imed_utils.display_selected_transfoms(transformation_dict,
                                              dataset_type=["testing"])

    if command == 'train':
        # LOAD DATASET
        # Get Validation dataset
        ds_valid = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': valid_lst,
                'transforms_params': transform_valid_params,
                'dataset_type': 'validation'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)
        # Get Training dataset
        ds_train = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': train_lst,
                'transforms_params': transform_train_params,
                'dataset_type': 'training'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)

        metric_fns = imed_metrics.get_metric_fns(ds_train.task)

        # If FiLM, normalize data
        if 'film_layers' in model_params and any(model_params['film_layers']):
            # Normalize metadata before sending to the FiLM network
            results = imed_film.get_film_metadata_models(
                ds_train=ds_train,
                metadata_type=model_params['metadata'],
                debugging=context["debugging"])
            ds_train, train_onehotencoder, metadata_clustering_models = results
            ds_valid = imed_film.normalize_metadata(
                ds_valid, metadata_clustering_models, context["debugging"],
                model_params['metadata'])
            model_params.update({
                "film_onehotencoder":
                train_onehotencoder,
                "n_metadata":
                len([ll for l in train_onehotencoder.categories_ for ll in l])
            })
            joblib.dump(metadata_clustering_models,
                        "./" + log_directory + "/clustering_models.joblib")
            joblib.dump(train_onehotencoder,
                        "./" + log_directory + "/one_hot_encoder.joblib")

        # Model directory
        path_model = os.path.join(log_directory, context["model_name"])
        if not os.path.isdir(path_model):
            print('Creating model directory: {}'.format(path_model))
            os.makedirs(path_model)
            if 'film_layers' in model_params and any(
                    model_params['film_layers']):
                joblib.dump(train_onehotencoder,
                            os.path.join(path_model, "one_hot_encoder.joblib"))
                if 'metadata_dict' in ds_train[0]['input_metadata'][0]:
                    metadata_dict = ds_train[0]['input_metadata'][0][
                        'metadata_dict']
                    joblib.dump(
                        metadata_dict,
                        os.path.join(path_model, "metadata_dict.joblib"))

        else:
            print('Model directory already exists: {}'.format(path_model))

        # RUN TRAINING
        best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train(
            model_params=model_params,
            dataset_train=ds_train,
            dataset_val=ds_valid,
            training_params=context["training_parameters"],
            log_directory=log_directory,
            device=device,
            cuda_available=cuda_available,
            metric_fns=metric_fns,
            n_gif=n_gif,
            resume_training=resume_training,
            debugging=context["debugging"])

    if thr_increment:
        # LOAD DATASET
        if command != 'train':  # If command == train, then ds_valid already load
            # Get Validation dataset
            ds_valid = imed_loader.load_dataset(**{
                **loader_params,
                **{
                    'data_list': valid_lst,
                    'transforms_params': transform_valid_params,
                    'dataset_type': 'validation'
                }
            },
                                                device=device,
                                                cuda_available=cuda_available)
        # Get Training dataset with no Data Augmentation
        ds_train = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': train_lst,
                'transforms_params': transform_valid_params,
                'dataset_type': 'training'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)

        # Choice of optimisation metric
        metric = "recall_specificity" if model_params[
            "name"] in imed_utils.CLASSIFIER_LIST else "dice"
        # Model path
        model_path = os.path.join(log_directory, "best_model.pt")
        # Run analysis
        thr = imed_testing.threshold_analysis(model_path=model_path,
                                              ds_lst=[ds_train, ds_valid],
                                              model_params=model_params,
                                              testing_params=testing_params,
                                              metric=metric,
                                              increment=thr_increment,
                                              fname_out=os.path.join(
                                                  log_directory, "roc.png"),
                                              cuda_available=cuda_available)

        # Update threshold in config file
        context["postprocessing"]["binarize_prediction"] = {"thr": thr}

    if command == 'train':
        # Save config file within log_directory and log_directory/model_name
        # Done after the threshold_analysis to propate this info in the config files
        with open(os.path.join(log_directory, "config_file.json"), 'w') as fp:
            json.dump(context, fp, indent=4)
        with open(
                os.path.join(log_directory, context["model_name"],
                             context["model_name"] + ".json"), 'w') as fp:
            json.dump(context, fp, indent=4)

        return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss

    if command == 'test':
        # LOAD DATASET
        ds_test = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': test_lst,
                'transforms_params': transformation_dict,
                'dataset_type': 'testing',
                'requires_undo': True
            }
        },
                                           device=device,
                                           cuda_available=cuda_available)

        metric_fns = imed_metrics.get_metric_fns(ds_test.task)

        if 'film_layers' in model_params and any(model_params['film_layers']):
            clustering_path = os.path.join(log_directory,
                                           "clustering_models.joblib")
            metadata_clustering_models = joblib.load(clustering_path)
            ohe_path = os.path.join(log_directory, "one_hot_encoder.joblib")
            one_hot_encoder = joblib.load(ohe_path)
            ds_test = imed_film.normalize_metadata(ds_test,
                                                   metadata_clustering_models,
                                                   context["debugging"],
                                                   model_params['metadata'])
            model_params.update({
                "film_onehotencoder":
                one_hot_encoder,
                "n_metadata":
                len([ll for l in one_hot_encoder.categories_ for ll in l])
            })

        # RUN INFERENCE
        pred_metrics = imed_testing.test(
            model_params=model_params,
            dataset_test=ds_test,
            testing_params=testing_params,
            log_directory=log_directory,
            device=device,
            cuda_available=cuda_available,
            metric_fns=metric_fns,
            postprocessing=context['postprocessing'])

        # RUN EVALUATION
        df_results = imed_evaluation.evaluate(
            bids_path=loader_params['bids_path'],
            log_directory=log_directory,
            target_suffix=loader_params["target_suffix"],
            eval_params=context["evaluation_parameters"])
        return df_results, pred_metrics

    if command == 'segment':
        bids_ds = bids.BIDS(context["loader_parameters"]["bids_path"])
        df = bids_ds.participants.content
        subj_lst = df['participant_id'].tolist()
        bids_subjects = [
            s for s in bids_ds.get_subjects()
            if s.record["subject_id"] in subj_lst
        ]

        # Add postprocessing to packaged model
        path_model = os.path.join(context['log_directory'],
                                  context['model_name'])
        path_model_config = os.path.join(path_model,
                                         context['model_name'] + ".json")
        model_config = imed_config_manager.load_json(path_model_config)
        model_config['postprocessing'] = context['postprocessing']
        with open(path_model_config, 'w') as fp:
            json.dump(model_config, fp, indent=4)

        options = None
        for subject in bids_subjects:
            fname_img = subject.record["absolute_path"]
            if 'film_layers' in model_params and any(
                    model_params['film_layers']) and model_params['metadata']:
                subj_id = subject.record['subject_id']
                metadata = df[df['participant_id'] == subj_id][
                    model_params['metadata']].values[0]
                options = {'metadata': metadata}
            pred = imed_inference.segment_volume(path_model,
                                                 fname_image=fname_img,
                                                 gpu_number=context['gpu'],
                                                 options=options)
            pred_path = os.path.join(context['log_directory'], "pred_masks")
            if not os.path.exists(pred_path):
                os.makedirs(pred_path)
            filename = subject.record['subject_id'] + "_" + subject.record[
                'modality'] + "_pred" + ".nii.gz"
            nib.save(pred, os.path.join(pred_path, filename))
Exemple #20
0
def test_inference(transforms_dict, test_lst, target_lst, roi_params, testing_params):
    cuda_available, device = imed_utils.define_device(GPU_ID)

    model_params = {"name": "Unet", "is_2d": True}
    loader_params = {
        "transforms_params": transforms_dict,
        "data_list": test_lst,
        "dataset_type": "testing",
        "requires_undo": True,
        "contrast_params": {"contrast_lst": ['T2w'], "balance": {}},
        "path_data": [__data_testing_dir__],
        "target_suffix": target_lst,
        "roi_params": roi_params,
        "slice_filter_params": {
            "filter_empty_mask": False,
            "filter_empty_input": True
        },
        "slice_axis": SLICE_AXIS,
        "multichannel": False
    }
    loader_params.update({"model_params": model_params})

    # Get Testing dataset
    ds_test = imed_loader.load_dataset(**loader_params)
    test_loader = DataLoader(ds_test, batch_size=BATCH_SIZE,
                             shuffle=False, pin_memory=True,
                             collate_fn=imed_loader_utils.imed_collate,
                             num_workers=0)

    # Undo transform
    val_undo_transform = imed_transforms.UndoCompose(imed_transforms.Compose(transforms_dict))

    # Update testing_params
    testing_params.update({
        "slice_axis": loader_params["slice_axis"],
        "target_suffix": loader_params["target_suffix"],
        "undo_transforms": val_undo_transform
    })

    # Model
    model = imed_models.Unet()

    if cuda_available:
        model.cuda()
    model.eval()

    metric_fns = [imed_metrics.dice_score,
                  imed_metrics.hausdorff_score,
                  imed_metrics.precision_score,
                  imed_metrics.recall_score,
                  imed_metrics.specificity_score,
                  imed_metrics.intersection_over_union,
                  imed_metrics.accuracy_score]

    metric_mgr = imed_metrics.MetricManager(metric_fns)

    if not os.path.isdir(__output_dir__):
        os.makedirs(__output_dir__)

    preds_npy, gt_npy = imed_testing.run_inference(test_loader=test_loader,
                                                   model=model,
                                                   model_params=model_params,
                                                   testing_params=testing_params,
                                                   ofolder=__output_dir__,
                                                   cuda_available=cuda_available)

    metric_mgr(preds_npy, gt_npy)
    metrics_dict = metric_mgr.get_results()
    metric_mgr.reset()
    print(metrics_dict)