Esempio n. 1
0
    def _do_job(self, dirs):
        for dir in dirs:

            subject = dir

            if os.path.exists(
                    os.path.join(self._root_dir, dir, "mri",
                                 "aparc+aseg.mgz")):
                if not os.path.exists(
                        os.path.join(self._output_dir, dir, "mri")):
                    os.makedirs(os.path.join(self._output_dir, dir, "mri"))

                self._extract_labels(
                    os.path.join(self._root_dir, dir, "mri"),
                    os.path.join(self._output_dir, dir, "mri"))
                t1, labels = ToNumpyArray()(os.path.join(
                    self._root_dir, dir, "mri",
                    "brainmask.mgz")), ToNumpyArray()(os.path.join(
                        self._output_dir, dir, "mri", "labels.nii.gz"))
                t1, labels = self._align(t1, labels)
                t1, labels = self._crop_to_content(t1, labels)
                if self._augment:
                    t1 = self._augmentation_transforms(t1)
                if self._do_min_max_scaling:
                    t1 = self._min_max_scale(t1)
                if self._do_standardization:
                    t1 = self._normalize(t1, self._mean, self._std)
                if self._do_extract_patches:
                    self._extract_patches(t1, subject, "T1", self._patch_size,
                                          self._step)
                    self._extract_patches(labels, subject, "Labels",
                                          self._patch_size, self._step)
                    os.remove(
                        os.path.join(self._output_dir, subject, "mri",
                                     "labels.nii.gz"))
                else:
                    self._write_image(t1, subject, "T1")
                    self._write_image(labels, subject, "Labels")
                os.remove(
                    os.path.join(self._output_dir, subject, "mri",
                                 "csf_mask.mgz"))
                os.remove(
                    os.path.join(self._output_dir, subject, "mri",
                                 "labels.nii.gz"))
                os.remove(
                    os.path.join(self._output_dir, subject, "mri",
                                 "wm_mask.mgz"))
                os.remove(
                    os.path.join(self._output_dir, subject, "mri",
                                 "gm_mask.mgz"))
Esempio n. 2
0
 def __init__(self, csv_path, dataset, test_size, modalities):
     LOGGER = logging.getLogger("HD5FWriter")
     self._csv = pandas.read_csv(csv_path)
     self._dataset = dataset
     self._test_size = test_size
     self._modalities = modalities
     self._transform = transforms.Compose([ToNumpyArray()])
Esempio n. 3
0
    def _extract_labels(self, input_dir, output_dir):
        self._mri_binarize(os.path.join(input_dir, "aparc+aseg.mgz"),
                           os.path.join(output_dir, "wm_mask.mgz"), "wm")

        self._mri_binarize(os.path.join(input_dir, "aparc+aseg.mgz"),
                           os.path.join(output_dir, "gm_mask.mgz"), "gm")
        self._mri_binarize(os.path.join(input_dir, "aparc+aseg.mgz"),
                           os.path.join(output_dir, "csf_mask.mgz"), "csf")

        csf_labels = ToNumpyArray()(os.path.join(output_dir, "csf_mask.mgz"))
        gm_labels = self._remap_labels(os.path.join(output_dir, "gm_mask.mgz"),
                                       1, 2)
        wm_labels = self._remap_labels(os.path.join(output_dir, "wm_mask.mgz"),
                                       1, 3)

        # merged = self._merge_volumes(gm_labels, wm_labels, csf_labels)
        #
        # brainmask = ToNumpyArray()(os.path.join(input_dir, "brainmask.mgz"))
        # T1 = ApplyMask(merged)(brainmask)
        #
        # csf = brainmask - T1
        # csf[csf != 0] = 1
        #
        # csf_labels = csf_labels + csf

        merged = self._merge_volumes(gm_labels, wm_labels, csf_labels)

        transform_ = transforms.Compose([
            ToNifti1Image(),
            NiftiToDisk(os.path.join(output_dir, "labels.nii.gz"))
        ])
        transform_(merged)
Esempio n. 4
0
 def __init__(self, root_path: str, subject: str, patch_size, step):
     self._image_path = os.path.join(root_path, subject, "T1", "T1.nii.gz")
     self._image = PadToShape(target_shape=[1, 256, 256, 192])(ToNumpyArray()(self._image_path))
     self._slices = SliceBuilder(self._image, patch_size=patch_size, step=step).build_slices(
         keep_centered_on_foreground=True)
     self._image_max = self._image.max()
     self._patch_size = patch_size
     self._step = step
Esempio n. 5
0
 def __init__(self,
              root_dir: str,
              output_dir: str,
              scaler: Callable = None,
              params: dict = None):
     self._root_dir = root_dir
     self._output_dir = output_dir
     self._transforms = transforms.Compose([ToNumpyArray()])
     self._scaler = scaler
     self._params = params
Esempio n. 6
0
 def setUp(self) -> None:
     paths = extract_file_paths(self.PATH)
     self._dataset = MRBrainSSegmentationFactory.create(
         natural_sort(paths), None, modalities=Modality.T1, dataset_id=0)
     self._reconstructor = ImageReconstructor([256, 256, 192],
                                              [1, 32, 32, 32], [1, 8, 8, 8])
     transforms = Compose(
         [ToNumpyArray(),
          PadToPatchShape([1, 32, 32, 32], [1, 8, 8, 8])])
     self._full_image = transforms(self.FULL_IMAGE_PATH)
Esempio n. 7
0
    def compute_normalized_shape_from_images_in(self, root_dir_1, root_dir_2):
        image_shapes_iSEG = []
        image_shapes_MRBrainS = []

        for root, dirs, files in os.walk(root_dir_1):
            for file in list(filter(lambda path: Image.is_nifti(path), files)):
                try:
                    self.LOGGER.debug(
                        "Computing the bounding box of {}".format(file))
                    c, d_min, d_max, h_min, h_max, w_min, w_max = CropToContent.extract_content_bounding_box_from(
                        ToNumpyArray()(os.path.join(root, file)))
                    image_shapes_iSEG.append(
                        (c, d_max - d_min, h_max - h_min, w_max - w_min))
                except Exception as e:
                    self.LOGGER.warning(
                        "Error while computing the content bounding box for {} with error {}"
                        .format(file, e))

        c, h, w, d = reduce(
            lambda a, b:
            (a[0], max(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3])),
            image_shapes_iSEG)

        for root, dirs, files in os.walk(root_dir_2):
            for file in list(filter(lambda path: Image.is_nifti(path), files)):
                try:
                    self.LOGGER.debug(
                        "Computing the bounding box of {}".format(file))
                    c, d_min, d_max, h_min, h_max, w_min, w_max = CropToContent.extract_content_bounding_box_from(
                        ToNumpyArray()(os.path.join(root, file)))
                    image_shapes_MRBrainS.append(
                        (c, d_max - d_min, h_max - h_min, w_max - w_min))
                except Exception as e:
                    self.LOGGER.warning(
                        "Error while computing the content bounding box for {} with error {}"
                        .format(file, e))

        c_2, h_2, w_2, d_2 = reduce(
            lambda a, b:
            (a[0], max(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3])),
            image_shapes_MRBrainS)
        return max(c, c_2), max(h, h_2), max(w, w_2), max(d, d_2)
Esempio n. 8
0
 def __init__(self,
              root_dir_iseg: str,
              root_dir_mrbrains: str,
              output_dir: str,
              params: dict = None):
     self._root_dir_iseg = root_dir_iseg
     self._root_dir_mrbrains = root_dir_mrbrains
     self._output_dir = output_dir
     self._normalized_shape = self.compute_normalized_shape_from_images_in(
         self._root_dir_iseg, self._root_dir_mrbrains)
     self._transforms = transforms.Compose([
         ToNumpyArray(),
         CropToContent(),
         PadToShape(self._normalized_shape)
     ])
     self._params = params
Esempio n. 9
0
 def __init__(self,
              image_size: List[int],
              patch_size: List[int],
              step: List[int],
              models: List[torch.nn.Module] = None,
              normalize: bool = False,
              segment: bool = False,
              normalize_and_segment: bool = False,
              test_image: np.ndarray = None):
     self._patch_size = patch_size
     self._image_size = image_size
     self._step = step
     self._models = models
     self._do_normalize = normalize
     self._do_segment = segment
     self._do_normalize_and_segment = normalize_and_segment
     self._transform = Compose([ToNumpyArray()])
     self._test_image = test_image
Esempio n. 10
0
    def run(self, output_filename: str):
        source_paths = list()
        target_paths = list()
        subjects = list()
        sites = list()
        for dir in sorted(os.listdir(self._source_dir)):
            source_paths_ = extract_file_paths(
                os.path.join(self._source_dir, dir, "mri", "T1"), "T1.nii.gz")
            target_paths_ = extract_file_paths(
                os.path.join(self._source_dir, dir, "mri", "Labels"),
                "Labels.nii.gz")
            subject_ = dir
            source_paths.append(source_paths_)
            target_paths.append(target_paths_)
            if len(source_paths_) is not 0:
                match = re.search('(?P<site>.*)_(?P<patient_id>[0-9]*)',
                                  str(dir))
                site_ = match.group("site")
                sites.append(site_)
                subjects.append(subject_)

        source_paths = list(filter(None, source_paths))
        target_paths = list(filter(None, target_paths))

        with open(os.path.join(self._output_dir, output_filename),
                  mode='a+') as output_file:
            writer = csv.writer(output_file,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_MINIMAL)
            writer.writerow([
                "T1", "labels", "subject", "site", "min", "max", "mean", "std"
            ])

            for source_path, target_path, subject, site in zip(
                    source_paths, target_paths, subjects, sites):
                self.LOGGER.info("Processing file {}".format(source_path))

                image = ToNumpyArray()(source_path[0])
                csv_data = np.vstack(
                    (source_path, target_path, subject, site, (image.min()),
                     (image.max()), (image.mean()), (image.std())))

                for item in range(csv_data.shape[1]):
                    writer.writerow([
                        csv_data[0][item], csv_data[1][item],
                        csv_data[2][item], csv_data[3][item],
                        csv_data[4][item], csv_data[5][item],
                        csv_data[6][item], csv_data[7][item]
                    ])
            output_file.close()
Esempio n. 11
0
    def __init__(self,
                 images: List[str],
                 patch_size: Tuple[int, int, int, int],
                 reconstructed_image_size: Tuple[int, int, int, int],
                 step: Tuple[int, int, int, int],
                 batch_size: int = 5,
                 models: List[torch.nn.Module] = None,
                 normalize: bool = False,
                 is_ground_truth: bool = False,
                 normalize_and_segment: bool = False,
                 is_multimodal=False,
                 alpha=0.0,
                 prob_bias=0.0,
                 snr=0.0,
                 prob_noise=0.0):
        self._patch_size = patch_size
        self._reconstructed_image_size = reconstructed_image_size
        self._step = step
        self._models = models
        self._do_normalize = normalize
        self._is_ground_truth = is_ground_truth
        self._do_normalize_and_segment = normalize_and_segment
        self._is_multimodal = is_multimodal
        self._batch_size = batch_size
        self._alpha = alpha
        self._snr = snr
        self._prob_bias = prob_bias
        self._prob_noise = prob_noise

        transformed_images = []
        for image in images:
            transform = Compose([
                ToNumpyArray(),
                PadToShape(target_shape=self._reconstructed_image_size)
            ])
            transformed_images.append(transform(image))

        self._images = transformed_images

        self._overlap_maps = list(
            map(
                lambda image: SliceBuilder(image, self._patch_size, self._step)
                .build_overlap_map(), self._images))
Esempio n. 12
0
 def setUp(self) -> None:
     transforms = Compose(
         [ToNumpyArray(),
          PadToPatchShape((1, 32, 32, 32), (1, 8, 8, 8))])
     self._image = transforms(self.FULL_IMAGE_PATH)
     self._target = transforms(self.TARGET_PATH)
     patches = iSEGSliceDatasetFactory.get_patches([self._image],
                                                   [self._target],
                                                   (1, 32, 32, 32),
                                                   (1, 16, 16, 16))
     self._dataset = iSEGSliceDatasetFactory.create(
         [self._image], [self._target],
         patches,
         Modality.T1,
         0,
         transforms=[ToNDTensor()])
     self._reconstructor = ImageReconstructor([256, 192, 160],
                                              [1, 32, 32, 32],
                                              [1, 16, 16, 16],
                                              models=None,
                                              test_image=self._image)
Esempio n. 13
0
    def _extract_labels(self, input_dir, output_dir):
        self._mri_binarize(os.path.join(input_dir, "aparc+aseg.mgz"),
                           os.path.join(output_dir, "wm_mask.mgz"), "wm")

        self._mri_binarize(os.path.join(input_dir, "aparc+aseg.mgz"),
                           os.path.join(output_dir, "gm_mask.mgz"), "gm")
        self._mri_binarize(os.path.join(input_dir, "aparc+aseg.mgz"),
                           os.path.join(output_dir, "csf_mask.mgz"), "csf")

        csf_labels = ToNumpyArray()(os.path.join(output_dir, "csf_mask.mgz"))
        gm_labels = self._remap_labels(os.path.join(output_dir, "gm_mask.mgz"),
                                       1, 2)
        wm_labels = self._remap_labels(os.path.join(output_dir, "wm_mask.mgz"),
                                       1, 3)

        merged = self._merge_volumes(gm_labels, wm_labels, csf_labels)

        transform_ = transforms.Compose([
            ToNifti1Image(),
            NiftiToDisk(os.path.join(output_dir, "labels.nii.gz"))
        ])
        transform_(merged)
Esempio n. 14
0
    def _to_numpy_array(self, file):
        transform_ = transforms.Compose([ToNumpyArray()])

        return transform_(file)
Esempio n. 15
0
 def __init__(self, root_dir: str):
     self._root_dir = root_dir
     self._transforms = Compose([ToNumpyArray()])
Esempio n. 16
0
    def run(self, prefix="standardize_"):
        images_np = list()
        headers = list()
        file_names = list()
        root_dirs = list()
        root_dirs_number = list()
        EXCLUDED = ["ROI", "label", "Normalized"]

        for root, dirs, files in os.walk(os.path.join(
                self._root_dir_mrbrains)):
            root_dir_number = os.path.basename(os.path.normpath(root))
            images = list(filter(re.compile(r"^T.*\.nii").search, files))
            for file in images:
                try:
                    self.LOGGER.info("Processing: {}".format(file))
                    file_names.append(file)
                    root_dirs.append(root)
                    root_dirs_number.append(root_dir_number)
                    images_np.append(self._transforms(os.path.join(root,
                                                                   file)))
                    headers.append(
                        self._get_image_header(os.path.join(root, file)))

                except Exception as e:
                    self.LOGGER.warning(e)

        for root, dirs, files in os.walk(os.path.join(self._root_dir_iseg)):
            if os.path.basename(os.path.normpath(root)) in EXCLUDED:
                continue

            root_dir_number = os.path.basename(os.path.normpath(root))
            images = list(filter(re.compile(r".*T.*\.nii").search, files))

            for file in images:
                try:
                    self.LOGGER.info("Processing: {}".format(file))
                    file_names.append(file)
                    root_dirs.append(root)
                    root_dirs_number.append(root_dir_number)
                    images_np.append(self._transforms(os.path.join(root,
                                                                   file)))
                    headers.append(
                        self._get_image_header(os.path.join(root, file)))

                except Exception as e:
                    self.LOGGER.warning(e)

        images = np.array(images_np).astype(np.float32)
        transformed_images = np.subtract(images,
                                         np.mean(images)) / np.std(images)

        for i in range(transformed_images.shape[0]):
            if "MRBrainS" in root_dirs[i]:
                root_dir_number = os.path.basename(
                    os.path.normpath(root_dirs[i]))
                if not os.path.exists(
                        os.path.join(
                            self._output_dir, "MRBrainS/Dual_Standardized/{}".
                            format(root_dir_number))):
                    os.makedirs(
                        os.path.join(
                            self._output_dir,
                            "MRBrainS/Dual_Standardized/{}".format(
                                root_dir_number)))
                transforms_ = transforms.Compose([
                    ToNifti1Image(),
                    NiftiToDisk(
                        os.path.join(
                            os.path.join(
                                self._output_dir,
                                os.path.join("MRBrainS/Dual_Standardized",
                                             root_dir_number)),
                            prefix + file_names[i]))
                ])
                transforms_(transformed_images[i])
            elif "iSEG" in root_dirs[i]:
                root_dir_number = os.path.basename(
                    os.path.normpath(root_dirs[i]))
                if not os.path.exists(
                        os.path.join(
                            self._output_dir, "iSEG/Dual_Standardized/{}".
                            format(root_dir_number))):
                    os.makedirs(
                        os.path.join(
                            self._output_dir,
                            "iSEG/Dual_Standardized/{}".format(
                                root_dir_number)))
                transforms_ = transforms.Compose([
                    ToNifti1Image(),
                    NiftiToDisk(
                        os.path.join(
                            os.path.join(
                                self._output_dir,
                                os.path.join("iSEG/Dual_Standardized",
                                             root_dir_number)),
                            prefix + file_names[i]))
                ])

                transforms_(transformed_images[i])

        for root, dirs, files in os.walk(self._root_dir_mrbrains):
            root_dir_end = os.path.basename(os.path.normpath(root))

            images = list(
                filter(re.compile(r"^LabelsFor.*\.nii").search, files))

            for file in images:
                if not os.path.exists(
                        os.path.join(
                            self._output_dir,
                            os.path.join("MRBrainS/Dual_Standardized",
                                         root_dir_end))):
                    os.makedirs(
                        os.path.join(
                            self._output_dir,
                            os.path.join("MRBrainS/Dual_Standardized",
                                         root_dir_end)))

                transforms_ = transforms.Compose([
                    ToNumpyArray(),
                    CropToContent(),
                    PadToShape(self._normalized_shape),
                    ToNifti1Image(),
                    NiftiToDisk(
                        os.path.join(
                            os.path.join(
                                self._output_dir,
                                os.path.join("MRBrainS/Dual_Standardized",
                                             root_dir_end)), file))
                ])

                transforms_(os.path.join(root, file))

        for root, dirs, files in os.walk(self._root_dir_iseg):
            root_dir_end = os.path.basename(os.path.normpath(root))
            if "ROI" in root_dir_end or "label" in root_dir_end:
                for file in files:
                    if not os.path.exists(
                            os.path.join(
                                self._output_dir,
                                os.path.join("iSEG/Dual_Standardized",
                                             root_dir_end))):
                        os.makedirs(
                            os.path.join(
                                self._output_dir,
                                os.path.join("iSEG/Dual_Standardized",
                                             root_dir_end)))
                    transforms_ = transforms.Compose([
                        ToNumpyArray(),
                        CropToContent(),
                        PadToShape(self._normalized_shape),
                        ToNifti1Image(),
                        NiftiToDisk(
                            os.path.join(
                                os.path.join(
                                    self._output_dir,
                                    os.path.join("iSEG/Dual_Standardized",
                                                 root_dir_end)), file))
                    ])

                    transforms_(os.path.join(root, file))
Esempio n. 17
0
 def _remap_labels(input, old_value, remapped_value):
     remap_transform = transforms.Compose(
         [ToNumpyArray(),
          RemapClassIDs([old_value], [remapped_value])])
     return remap_transform(input)
Esempio n. 18
0
    return np.sqrt(js_div)


if __name__ == '__main__':
    model = Unet(1, 1, True, True)

    iseg_csv = "/mnt/md0/Data/iSEG_scaled/Training/output_iseg_images.csv"
    mrbrains_csv = "/mnt/md0/Data/MRBrainS_scaled/DataNii/TrainingData/output_mrbrains_images.csv"
    abide_csv = "/mnt/md0/Data/ABIDE_scaled/output_abide_images.csv"
    iseg_csv = pandas.read_csv(iseg_csv)
    mrbrains_csv = pandas.read_csv(mrbrains_csv)
    abide_csv = pandas.read_csv(abide_csv).sample(75)

    c, d, h, w = 1, 256, 256, 192

    transform = transforms.Compose([ToNumpyArray(), PadToShape((c, d, h, w))])
    iseg_inputs = torch.tensor([transform(image) for image in iseg_csv["T1"]])
    mrbrains_inputs = torch.tensor(
        [transform(image) for image in mrbrains_csv["T1"]])
    abide_inputs = torch.tensor(
        [transform(image) for image in abide_csv["T1"]])
    generated_iseg = transform(
        "/mnt/md0/Research/DualUNet/Reconstructed_Normalized_iSEG_Image_80.nii.gz"
    )
    generated_mrbrains = transform(
        "/mnt/md0/Research/DualUNet/Reconstructed_Normalized_MRBrainS_Image_80.nii.gz"
    )
    generated_abide = transform(
        "/mnt/md0/Research/DualUNet/Reconstructed_Normalized_ABIDE_Image_80.nii.gz"
    )
    segmentation_iseg = transform(
Esempio n. 19
0
    def run(self, output_filename: str):
        source_paths_t1_1mm = list()
        source_paths_t2 = list()
        source_paths_t1 = list()
        source_paths_t1_ir = list()
        target_paths = list()
        target_paths_training = list()

        for subject in sorted(os.listdir(os.path.join(self._source_dir))):
            source_paths_t2.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject, "T2_FLAIR")))
            source_paths_t1_ir.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject, "T1_IR")))
            source_paths_t1_1mm.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject, "T1_1mm")))
            source_paths_t1.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject, "T1")))
            target_paths.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject,
                                 "LabelsForTesting")))
            target_paths_training.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject,
                                 "LabelsForTraining")))

        subjects = np.arange(1, 6)

        with open(os.path.join(self._output_dir, output_filename),
                  mode='a+') as output_file:
            writer = csv.writer(output_file,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_MINIMAL)
            writer.writerow([
                "T1_1mm", "T1", "T1_IR", "T2_FLAIR", "LabelsForTesting",
                "LabelsForTraining", "subject", "T1_min", "T1_max", "T1_mean",
                "T1_std", "T2_min", "T2_max", "T2_mean", "T2_std"
            ])

            for source_path_t2, source_path_t1_ir, source_path_t1_1mm, source_path_t1, target_path, target_path_training, subject in zip(
                    source_paths_t2, source_paths_t1_ir, source_paths_t1_1mm,
                    source_paths_t1, target_paths, target_paths_training,
                    subjects):
                self.LOGGER.info("Processing file {}".format(source_path_t1))

                t1 = ToNumpyArray()(source_path_t1[0])
                t2 = ToNumpyArray()(source_path_t2[0])
                csv_data = np.vstack(
                    (source_path_t1_1mm, source_path_t1, source_path_t1_ir,
                     source_path_t2, target_path, target_path_training,
                     subject, str(t1.min()), str(t1.max()), str(t1.mean()),
                     str(t1.std()), str(t2.min()), str(t2.max()),
                     str(t2.mean()), str(t2.std())))

                for item in range(csv_data.shape[1]):
                    writer.writerow([
                        csv_data[0][item], csv_data[1][item],
                        csv_data[2][item], csv_data[3][item],
                        csv_data[4][item], csv_data[5][item],
                        csv_data[6][item], csv_data[7][item],
                        csv_data[8][item], csv_data[9][item],
                        csv_data[10][item], csv_data[11][item],
                        csv_data[12][item], csv_data[13][item],
                        csv_data[14][item]
                    ])
            output_file.close()
Esempio n. 20
0
 def __init__(self, root_dir: str, output_dir: str):
     self._source_dir = root_dir
     self._output_dir = output_dir
     self._transforms = Compose([ToNumpyArray()])