Пример #1
0
def get_data_generators(opt, test_fold):
    folds = set(range(1, opt.nFolds + 1))
    exclude_train = {test_fold}
    exclude_val = folds - exclude_train

    train_set = BCDatasets(opt.data,
                           opt.dataset,
                           opt.sr,
                           exclude_train,
                           transform=get_train_transform(opt.inputLength),
                           mix=opt.BC)
    val_set = BCDatasets(opt.data,
                         opt.dataset,
                         opt.sr,
                         exclude_val,
                         transform=get_test_transform(opt.inputLength))

    params = {'batch_size': opt.batchSize, 'shuffle': True, 'num_workers': 1}
    training_generator = data.DataLoader(train_set, **params)

    params = {'batch_size': 1, 'shuffle': True, 'num_workers': 1}
    validation_generator = cudify(data.DataLoader(val_set, **params))

    if use_cuda(opt):
        training_generator = cudify(training_generator)
        validation_generator = cudify(validation_generator)

    return training_generator, validation_generator
Пример #2
0
def get_transform(augmentation, landmarks_path):
    import datasets
    import torchio as tio
    if augmentation:
        return datasets.get_train_transform(landmarks_path)
    else:
        preprocess = datasets.get_test_transform(landmarks_path)
        augment = tio.Compose((tio.RandomFlip(),
                               tio.OneOf({
                                   tio.RandomAffine(): 0.8,
                                   tio.RandomElasticDeformation(): 0.2,
                               })))
        return tio.Compose((preprocess, augment))
Пример #3
0
def main(image_dir, label_dir, checkpoint_path, output_dir, landmarks_path,
         df_path, batch_size, num_workers, multi_gpu):
    import torch
    import torchio as tio
    import models
    import datasets
    import engine
    import utils

    fps = get_paths(image_dir)
    lfps = get_paths(label_dir)
    assert len(fps) == len(lfps)
    # key must be 'image' as in get_test_transform
    subjects = [
        tio.Subject(image=tio.ScalarImage(fp), label=tio.LabelMap(lfp))
        for (fp, lfp) in zip(fps, lfps)
    ]
    transform = datasets.get_test_transform(landmarks_path)
    dataset = tio.SubjectsDataset(subjects, transform)
    checkpoint = torch.load(checkpoint_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = models.get_unet().to(device)
    if multi_gpu:
        model = torch.nn.DataParallel(model)
        model.module.load_state_dict(checkpoint['model'])
    else:
        model.load_state_dict(checkpoint['model'])
    output_dir = Path(output_dir)
    model.eval()
    torch.set_grad_enabled(False)
    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         num_workers=num_workers)
    output_dir.mkdir(parents=True)
    evaluator = engine.Evaluator()
    df = evaluator.infer(model, loader, output_dir)
    df.to_csv(df_path)
    med, iqr = 100 * utils.get_median_iqr(df.Dice)
    print(f'{med:.1f} ({iqr:.1f})')
    return 0
Пример #4
0
def main(input_path, checkpoint_path, output_dir, landmarks_path, batch_size, num_workers, resample):
    import torch
    from tqdm import tqdm
    import torchio as tio
    import models
    import datasets

    fps = get_paths(input_path)
    subjects = [tio.Subject(image=tio.ScalarImage(fp)) for fp in fps]  # key must be 'image' as in get_test_transform
    transform = tio.Compose((
        tio.ToCanonical(),
        datasets.get_test_transform(landmarks_path),
    ))
    if resample:
        transform = tio.Compose((
            tio.Resample(),
            transform,
            # tio.CropOrPad((264, 268, 144)),  # ################################# for BITE?
        ))
    dataset = tio.SubjectsDataset(subjects, transform)
    checkpoint = torch.load(checkpoint_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = models.get_unet().to(device)
    model.load_state_dict(checkpoint['model'])
    output_dir = Path(output_dir)
    model.eval()
    torch.set_grad_enabled(False)
    loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)
    output_dir.mkdir(exist_ok=True, parents=True)
    for batch in tqdm(loader):
        inputs = batch['image'][tio.DATA].float().to(device)
        seg = model(inputs).softmax(dim=1)[:, 1:].cpu() > 0.5
        for tensor, affine, path in zip(seg, batch['image'][tio.AFFINE], batch['image'][tio.PATH]):
            image = tio.LabelMap(tensor=tensor, affine=affine.numpy())
            path = Path(path)
            out_path = output_dir / path.name.replace('.nii', '_seg_cnn.nii')
            image.save(out_path)
    return 0
Пример #5
0
def main(
    input_path,
    checkpoint_path,
    output_dir,
    landmarks_path,
    num_iterations,
    csv_path,
    num_workers,
    gpu,
    threshold,
    interpolation,
):
    import torch
    import pandas as pd
    import numpy as np
    import torchio as tio
    from tqdm import tqdm, trange

    import utils
    import models
    import datasets

    device = torch.device(
        'cuda' if torch.cuda.is_available() and gpu else 'cpu')
    checkpoint = torch.load(checkpoint_path, map_location=device)
    model = models.get_unet().to(device)
    model.load_state_dict(checkpoint['model'])
    output_dir = Path(output_dir)
    model.eval()
    utils.enable_dropout(model)

    torch.set_grad_enabled(False)

    fps = get_paths(input_path)
    mean_dir = output_dir / 'mean'
    std_dir = output_dir / 'std'
    entropy_dir = output_dir / 'entropy'
    mean_dir.mkdir(parents=True, exist_ok=True)
    std_dir.mkdir(parents=True, exist_ok=True)
    entropy_dir.mkdir(parents=True, exist_ok=True)

    records = []
    progress = tqdm(fps, unit='subject')
    for fp in progress:
        subject_id = fp.name[:4]
        progress.set_description(subject_id)
        image = tio.ScalarImage(fp)
        subject = tio.Subject(
            image=image)  # key must be 'image' as in get_test_transform
        preprocess = datasets.get_test_transform(landmarks_path)
        preprocessed = preprocess(subject)
        inputs = preprocessed.image.data.float()[np.newaxis].to(device)
        all_results = []
        for _ in trange(num_iterations, leave=False):
            with torch.cuda.amp.autocast():
                segs = model(inputs).softmax(dim=1)[0, 1:]
            all_results.append(segs.cpu())
        result = torch.stack(all_results)

        volumes = result.sum(dim=(-3, -2, -1)).numpy()
        mean_volumes = volumes.mean()
        std_volumes = volumes.std()
        volume_variation_coefficient = std_volumes / mean_volumes
        q1, q3 = np.percentile(volumes, (25, 75))
        quartile_coefficient_of_dispersion = (q3 - q1) / (q3 + q1)

        records.append(
            dict(
                Subject=subject_id,
                VolumeMean=mean_volumes,
                VolumeSTD=std_volumes,
                VVC=volume_variation_coefficient,
                Q1=q1,
                Q3=q3,
                QCD=quartile_coefficient_of_dispersion,
            ))

        crop: tio.Crop = preprocessed.history[-1]
        pad = crop.inverse()

        assert np.count_nonzero(
            result.numpy() < 0) == 0, 'neg values found in result'
        mean = result.mean(dim=0)
        assert np.count_nonzero(
            mean.numpy() < 0) == 0, 'neg values found in mean'
        std = result.std(dim=0)
        # entropy = utils.get_entropy(result)

        mean_image = tio.ScalarImage(tensor=mean,
                                     affine=preprocessed.image.affine)
        std_image = tio.ScalarImage(tensor=std,
                                    affine=preprocessed.image.affine)
        # entropy_image = tio.ScalarImage(tensor=entropy, affine=preprocessed.image.affine)

        mean_path = mean_dir / fp.name.replace('.nii', '_mean.nii')
        std_path = std_dir / fp.name.replace('.nii', '_std.nii')
        # entropy_path = entropy_dir / fp.name.replace('.nii', '_entropy.nii')

        pad(mean_image).save(mean_path)
        pad(std_image).save(std_path)
        # pad(entropy_image).save(entropy_path)

        # So it's updated while it runs
        df = pd.DataFrame.from_records(records)
        df.to_csv(csv_path)

    return 0