def test():

    csv = "/Users/lauramora/Documents/MASTER/TFM/Data/2020/train/no_patch/brats20_data.csv"
    data, data_test = dataset.read_brats(csv)

    modalities_to_use = {
        BratsDataset.flair_idx: True,
        BratsDataset.t1_idx: True,
        BratsDataset.t2_idx: True,
        BratsDataset.t1ce_idx: True
    }
    transforms = T.Compose([T.ToTensor()])
    sampling_method = importlib.import_module(
        "src.dataset.patching.random_tumor_distribution")
    patch_size = (128, 128, 128)
    n_patches = 10

    data = data * n_patches
    train_dataset = BratsDataset(data, modalities_to_use, sampling_method,
                                 patch_size, transforms)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=16,
                              shuffle=True,
                              num_workers=1)

    for idx, b_data, b_labels in train_loader:
        print(b_data)
    model_config = config.get_model_config()
    dataset_config = config.get_dataset_config()
    basic_config = config.get_basic_config()
    unc_config = config.get_uncertainty_config()
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")

    task = "segmentation_task"
    compute_metrics = False
    flag_post_process = True

    model, model_path = load_network(device, model_config, dataset_config,
                                     model_config["network"])

    setx = "train"
    data, data_test = dataset.read_brats(dataset_config.get("train_csv"))
    data.extend(data_test)

    sampling = dataset_config.get("sampling_method").split(".")[-1]

    uncertainty_flag = basic_config.getboolean("uncertainty_flag")
    uncertainty_type = unc_config.get("uncertainty_type")
    n_iterations = unc_config.getint("n_iterations")
    use_dropout = unc_config.getboolean("use_dropout")

    for idx in range(0, len(data)):

        patch_size = data[idx].size

        images = data[idx].load_mri_volumes(normalize=True)
        brain_mask = data[idx].get_brain_mask()
Exemplo n.º 3
0
from matplotlib import pyplot as plt
import numpy as np


def matplotlib_imshow(images, normalized=False):

    img = torchvision.utils.make_grid(images, padding=10)
    npimg = img.numpy()
    # c h w
    trans_im = np.transpose(npimg, (1, 2, 0))
    plt.imshow(trans_im, cmap="gray")
    plt.savefig("batch")


csv = "/Users/lauramora/Documents/MASTER/TFM/Data/2020/train/no_patch/brats20_data.csv"
data, data_test = dataset.read_brats(csv)

modalities_to_use = {
    BratsDataset.flair_idx: True,
    BratsDataset.t1_idx: True,
    BratsDataset.t2_idx: True,
    BratsDataset.t1ce_idx: True
}
sampling_method = importlib.import_module(
    "src.dataset.patching.random_tumor_distribution")
transforms = T.Compose([T.ToTensor()])

data_train = data * 100
batch_size = 4
train_dataset = BratsDataset(data_train, modalities_to_use, sampling_method,
                             (128, 128, 128), transforms)
    task = f"segmentation_task/{setx}"
    dataset_csv_path = f"{gen_path}/datasets/{setx}/no_patch/{csv}"

    models = [
        "model_1598550861", "model_1598639885", "model_1598640035",
        "model_1598640005"
    ]
    check_path = f"{gen_path}results/checkpoints/"
    models = list(map(lambda x: os.path.join(check_path, x, task), models))

    output_dir = os.path.join(check_path, f"{task}/ensemble_predictions/")
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    data, _ = dataset.read_brats(dataset_csv_path, lgg_only=False)

    for patient in tqdm(data, total=len(data), desc="Ensemble prediction"):
        patient_name = patient.patient

        seg_maps = read_preds_from_models(models, f"{patient_name}.nii.gz")
        ensemble_map = majority_voting(seg_maps, patient.get_brain_mask())

        output_path_with_name = os.path.join(output_dir,
                                             f"{patient_name}.nii.gz")
        save_segmask_as_nifi_volume(ensemble_map, patient.get_affine(),
                                    output_path_with_name)

        if compute_metrics:
            patient_path = os.path.join(patient.data_path, patient.patch_name,
                                        patient.seg)
Exemplo n.º 5
0
    basic_config = config.get_basic_config()

    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")
    sampling = dataset_config.get("sampling_method").split(".")[-1]

    models_gen_path = model_config.get("model_path")
    task = "ensemble_segmentation"
    compute_metrics = False

    model_vnet = load_model_1598550861(models_gen_path)
    model_2 = load_model_1598639885(models_gen_path)
    model_3 = load_model_1598640035(models_gen_path)
    model_4 = load_model_1598640005(models_gen_path)

    data, _ = dataset.read_brats(dataset_config.get("val_csv"))

    for idx in range(0, len(data)):
        results = {}

        images = data[idx].load_mri_volumes(normalize=True)

        x_1, x_2, y_1, y_2, z_1, z_2, images, brain_mask, patch_size = crop_no_patch(
            data[idx].size, images, data[idx].get_brain_mask(), sampling)

        _, prediction_four_channels_1 = predict.predict(
            model_vnet, images, device, False)
        _, prediction_four_channels_2 = predict.predict(
            model_2, images, device, False)
        _, prediction_four_channels_3 = predict.predict(
            model_3, images, device, False)
Exemplo n.º 6
0
                    del self.patches_by_patient[patient]

            yield batch_indices

    def __len__(self):
        return (len(self.dataset_indices) + self.batch_size -
                1) // self.batch_size


if __name__ == "__main__":
    from src.dataset.utils import dataset
    from torchvision import transforms as T
    import importlib

    data, _ = dataset.read_brats(
        "/Users/lauramora/Documents/MASTER/TFM/Data/2020/train/random_tumor_distribution/brats20_data.csv"
    )
    data_train = data[:40]
    data_val = data[:40]
    modalities_to_use = {
        BratsDataset.flair_idx: True,
        BratsDataset.t1_idx: True,
        BratsDataset.t2_idx: True,
        BratsDataset.t1ce_idx: True
    }
    sampling_method = importlib.import_module(
        "src.dataset.patching.random_tumor_distribution")

    dataset = BratsDataset(data_train, modalities_to_use, sampling_method,
                           (64, 64, 64), T.Compose([T.ToTensor()]))
    sampler = BratsPatchSampler(dataset, n_patients=2, n_samples=3)
patch_size = config.patch_size
tensorboard_logdir = basic_config.get("tensorboard_logs")
checkpoint_path = model_config.get("checkpoint")
batch_size = dataset_config.getint("batch_size")
n_patches = dataset_config.getint("n_patches")
n_classes = dataset_config.getint("classes")
loss = model_config.get("loss")

device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
    "cpu")
logger.info(f"Device: {device}")

######## DATASET
logger.info("Creating Dataset...")

data, _ = dataset.read_brats(dataset_config.get("train_csv"),
                             lgg_only=dataset_config.getboolean("lgg_only"))
data_train, data_val = train_val_split(data, val_size=0.2)
data_train = data_train * n_patches
data_val = data_val * n_patches

n_modalities = dataset_config.getint("n_modalities")  # like color channels
sampling_method = importlib.import_module(
    dataset_config.get("sampling_method"))

transform = transforms.Compose([
    color_augmentations.RandomIntensityShift(),
    color_augmentations.RandomIntensityScale(),
    spatial_augmentations.RandomMirrorFlip(p=0.5),
    spatial_augmentations.RandomRotation90(p=0.5)
])
Exemplo n.º 8
0

def unnorm(data, epsilon=1e-8):
    non_zero = data[data > 0.0]
    mean = non_zero.mean()
    std = non_zero.std() + epsilon
    out = data * std + mean
    out[data == 0] = 0
    return out


dataset_path = "/Users/lauramora/Documents/MASTER/TFM/Data/2020/train/no_patch"
train_csv = os.path.join(dataset_path, "brats20_data.csv")

print("Loading dataset")
data, data_test = read_brats(train_csv)
data = data_test

sampling_method = importlib.import_module(
    "src.dataset.patching.binary_distribution")

compute_patch = True
patch_size = (64, 64, 64)
batch_size = 4

print("Creating Dataset")
train_dataset = BratsDataset(data,
                             sampling_method,
                             patch_size,
                             compute_patch=compute_patch)
train_loader = DataLoader(dataset=train_dataset,
    data = np.array(data)

    for index, patient_patch in enumerate(data):
        patient = patient_patch.patient
        grade = patient_patch.grade

        if grade == "LGG":
            add_patch(patches_by_patient_lgg, index, patient)

        elif grade == "HGG":
            add_patch(patches_by_patient_hgg, index, patient)

        else:
            print("Unknown grade")

    train_patients_lgg, val_patients_lgg = get_split_random(
        data, patches_by_patient_lgg, val_size)
    train, val = get_split_random(data, patches_by_patient_hgg, val_size)

    train.extend(train_patients_lgg)
    val.extend(val_patients_lgg)

    return train, val


if __name__ == "__main__":
    csv_path = "/Users/lauramora/Documents/MASTER/TFM/Data/2020/train/no_patch/brats20_data.csv"
    data, _ = dataset.read_brats(csv_path)
    train, val = train_val_split(data)
    print()