Beispiel #1
0
    def run(self):
        images_T1 = natural_sort(extract_file_paths(os.path.join(self._root_dir, "T1")))
        labels = natural_sort(extract_file_paths(os.path.join(self._root_dir, "label")))
        files = np.stack((np.array(images_T1), np.array(labels)), axis=1)

        # self._dispatch_jobs(files, 8)
        self._do_job(files)
Beispiel #2
0
    def run(self, output_filename: str):

        source_paths_t2 = list()
        source_paths_t1 = list()
        target_paths = list()

        for subject in sorted(os.listdir(os.path.join(self._source_dir))):
            source_paths_t1.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject, "T1")))
            source_paths_t2.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject, "T2")))
            target_paths.append(
                extract_file_paths(
                    os.path.join(self._source_dir, subject, "Labels")))

        subjects = np.arange(1, 11)
        source_paths_t1 = natural_sort(
            [item for sublist in source_paths_t1 for item in sublist])
        source_paths_t2 = natural_sort(
            [item for sublist in source_paths_t2 for item in sublist])
        target_paths = natural_sort(
            [item for sublist in target_paths for item in sublist])

        with open(os.path.join(self._output_dir, output_filename),
                  mode='a+') as output_file:
            writer = csv.writer(output_file,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_MINIMAL)
            writer.writerow([
                "T1", "T2", "labels", "subject", "T1_min", "T1_max", "T1_mean",
                "T1_std", "T2_min", "T2_max", "T2_mean", "T2_std"
            ])

            for source_path, source_path_t2, target_path, subject in zip(
                    source_paths_t1, source_paths_t2, target_paths, subjects):
                self.LOGGER.info("Processing file {}".format(source_path))

                t1 = ToNumpyArray()(source_path)
                t2 = ToNumpyArray()(source_path_t2)

                csv_data = np.vstack((source_path, source_path_t2, target_path,
                                      subject, str(t1.min()), str(t1.max()),
                                      str(t1.mean()), str(t1.std()),
                                      str(t2.min()), str(t2.max()),
                                      str(t2.mean()), str(t2.std())))

                for item in range(csv_data.shape[1]):
                    writer.writerow([
                        csv_data[0][item], csv_data[1][item],
                        csv_data[2][item], csv_data[3][item],
                        csv_data[4][item], csv_data[5][item],
                        csv_data[6][item], csv_data[7][item],
                        csv_data[8][item], csv_data[9][item],
                        csv_data[10][item], csv_data[11][item]
                    ])
            output_file.close()
Beispiel #3
0
    def run(self, prefix: str = ""):
        images_T1 = natural_sort(
            extract_file_paths(os.path.join(self._root_dirs["iSEG"], "T1")))
        labels = natural_sort(
            extract_file_paths(os.path.join(self._root_dirs["iSEG"], "label")))
        files = np.stack((np.array(images_T1), np.array(labels)), axis=1)

        self._dataset_mean_iSEG = np.mean(
            self._dispatch_jobs_in_pool(files, 5, self._get_mean_iseg))
        self._dataset_std_iSEG = np.mean(
            self._dispatch_jobs_in_pool(files, 5, self._get_std_iseg))

        files = list()
        for subject in sorted(
                os.listdir(os.path.join(self._root_dirs["MRBrainS"]))):
            files.append(
                extract_file_paths(
                    os.path.join(self._root_dirs["MRBrainS"], subject)))

        self._dataset_mean_MRBrainS = np.mean(
            self._dispatch_jobs_in_pool(files, 5, self._get_mean_mrbrains))
        self._dataset_std_MRBrainS = np.mean(
            self._dispatch_jobs_in_pool(files, 5, self._get_std_mrbrains))

        files = pandas.read_csv(self._root_dirs["ABIDE"])
        images_T1 = np.asarray(files["T1"])
        labels = np.asarray(files["labels"])
        files = np.stack((np.array(images_T1), np.array(labels)), axis=1)

        self._dataset_mean_ABIDE = np.mean(
            self._dispatch_jobs_in_pool(files, 8, self._get_mean_abide))
        self._dataset_std_ABIDE = np.mean(
            self._dispatch_jobs_in_pool(files, 8, self._get_std_abide))

        print("Triple Dataset mean: {}".format(
            np.mean([
                self._dataset_mean_iSEG, self._dataset_mean_MRBrainS,
                self._dataset_mean_ABIDE
            ])))
        print("Triple Dataset std: {}".format(
            np.sqrt(
                np.mean([
                    self._dataset_std_iSEG, self._dataset_std_MRBrainS,
                    self._dataset_std_ABIDE
                ]))))

        print("Dual Dataset mean: {}".format(
            np.mean([self._dataset_mean_iSEG, self._dataset_mean_MRBrainS])))
        print("Dual Dataset std: {}".format(
            np.sqrt(
                np.mean([self._dataset_std_iSEG,
                         self._dataset_std_MRBrainS]))))
Beispiel #4
0
    def run(self, output_filename: str):
        source_paths = list()
        target_paths = list()
        subjects = list()
        sites = list()
        for dir in sorted(os.listdir(self._source_dir)):
            source_paths_ = extract_file_paths(
                os.path.join(self._source_dir, dir, "mri", "T1"), "T1.nii.gz")
            target_paths_ = extract_file_paths(
                os.path.join(self._source_dir, dir, "mri", "Labels"),
                "Labels.nii.gz")
            subject_ = dir
            source_paths.append(source_paths_)
            target_paths.append(target_paths_)
            if len(source_paths_) is not 0:
                match = re.search('(?P<site>.*)_(?P<patient_id>[0-9]*)',
                                  str(dir))
                site_ = match.group("site")
                sites.append(site_)
                subjects.append(subject_)

        source_paths = list(filter(None, source_paths))
        target_paths = list(filter(None, target_paths))

        with open(os.path.join(self._output_dir, output_filename),
                  mode='a+') as output_file:
            writer = csv.writer(output_file,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_MINIMAL)
            writer.writerow([
                "T1", "labels", "subject", "site", "min", "max", "mean", "std"
            ])

            for source_path, target_path, subject, site in zip(
                    source_paths, target_paths, subjects, sites):
                self.LOGGER.info("Processing file {}".format(source_path))

                image = ToNumpyArray()(source_path[0])
                csv_data = np.vstack(
                    (source_path, target_path, subject, site, (image.min()),
                     (image.max()), (image.mean()), (image.std())))

                for item in range(csv_data.shape[1]):
                    writer.writerow([
                        csv_data[0][item], csv_data[1][item],
                        csv_data[2][item], csv_data[3][item],
                        csv_data[4][item], csv_data[5][item],
                        csv_data[6][item], csv_data[7][item]
                    ])
            output_file.close()
Beispiel #5
0
    def run(self):
        source_paths = list()

        for subject in sorted(os.listdir(os.path.join(self._root_dir))):
            source_paths.append(extract_file_paths(os.path.join(self._root_dir, subject)))

        self._dispatch_jobs(source_paths, 8)
Beispiel #6
0
 def setUp(self) -> None:
     paths = extract_file_paths(self.PATH)
     self._dataset = MRBrainSSegmentationFactory.create(
         natural_sort(paths), None, modalities=Modality.T1, dataset_id=0)
     self._reconstructor = ImageReconstructor([256, 256, 192],
                                              [1, 32, 32, 32], [1, 8, 8, 8])
     transforms = Compose(
         [ToNumpyArray(),
          PadToPatchShape([1, 32, 32, 32], [1, 8, 8, 8])])
     self._full_image = transforms(self.FULL_IMAGE_PATH)