Пример #1
0
    def __init__(self,
                 data_split_csv,
                 train_preprocessings,
                 valid_preprocessings,
                 transforms,
                 to_tensor,
                 augments=None,
                 **kwargs):
        super().__init__(**kwargs)
        self.data_split_csv = data_split_csv
        self.train_preprocessings = Compose.compose(train_preprocessings)
        self.valid_preprocessings = Compose.compose(valid_preprocessings)
        self.transforms = Compose.compose(transforms)
        self.to_tensor = ToTensor()
        self.augments = Compose.compose(augments)
        self.data_paths = []

        # Collect the data paths according to the dataset split csv.

        with open(self.data_split_csv, "r") as f:
            type_ = 'Training' if self.type == 'train' else 'Validation'
            rows = csv.reader(f)
            for _path, split_type in rows:
                if split_type == type_:
                    self.data_paths.append(_path)
Пример #2
0
 def __init__(self, data_split_file_path, preprocess, transforms, **kwargs):
     super().__init__(**kwargs)
     data_split_file = pd.read_csv(data_split_file_path)
     self.data_paths = list(
         map(Path, data_split_file[data_split_file.type == self.type].path))
     self.preprocess = Compose.compose(preprocess)
     self.transforms = Compose.compose(transforms)
     self.to_tensor = ToTensor()
Пример #3
0
    def __init__(self, data_split_file_path, preprocess, transforms, **kwargs):
        super().__init__(**kwargs)
        if self.type == 'train':
            data_split_file = pd.read_csv(data_split_file_path)
            patient_dirs = map(
                Path, data_split_file[(data_split_file.type == 'train') |
                                      (data_split_file.type == 'valid')].path)
            self.data_paths = tuple(
                data_path for patient_dir in patient_dirs
                for data_path in sorted(patient_dir.glob('*_img.nii.gz')))
        elif self.type == 'valid':
            self.data_paths = tuple(['nan'])

        self.preprocess = Compose.compose(preprocess)
        self.transforms = Compose.compose(transforms)
        self.to_tensor = ToTensor()
 def __init__(self,
              data_split_file_path,
              transforms,
              augments=None,
              **kwargs):
     super().__init__(**kwargs)
     data_split_file = pd.read_csv(data_split_file_path)
     patient_dirs = tuple(
         map(Path, data_split_file[data_split_file.type == self.type].path))
     self.data_paths = tuple(
         data_path for patient_dir in patient_dirs for data_path in zip(
             sorted(patient_dir.glob('**/*frame??.nii.gz')),
             sorted(patient_dir.glob('**/*frame??_gt.nii.gz'))
             # sorted(patient_dir.glob('**/*frame??.nii.gz')) # For the testing dataset.
         ))
     self.transforms = Compose.compose(transforms)
     self.augments = Compose.compose(augments)
     self.to_tensor = ToTensor()
Пример #5
0
 def __init__(self,
              data_split_file_path,
              transforms,
              augments=None,
              **kwargs):
     super().__init__(**kwargs)
     data_split_file = pd.read_csv(data_split_file_path)
     self.csv_name = Path(data_split_file_path).name
     patient_dirs = map(
         Path, data_split_file[data_split_file.type == self.type].path)
     self.data_paths = tuple(
         data_path for patient_dir in patient_dirs for data_path in zip(
             sorted(patient_dir.glob('**/*frame??.nii.gz')), (
                 sorted(patient_dir.glob('**/*frame??.nii.gz')
                        ) if self.csv_name == 'testing.csv' else sorted(
                            patient_dir.glob('**/*frame??_gt.nii.gz')))))
     self.transforms = Compose.compose(transforms)
     self.augments = Compose.compose(augments)
     self.to_tensor = ToTensor()
    def __init__(self,
                 data_dir,
                 csv_name,
                 chosen_index,
                 chosen_label,
                 transforms,
                 augments=None,
                 **kwargs):
        super().__init__(**kwargs)

        self.data_dir = data_dir
        self.csv_name = csv_name
        self.chosen_index = chosen_index
        self.chosen_label = chosen_label
        self.transforms = Compose.compose(transforms)
        self.augments = Compose.compose(augments)
        self.to_tensor = ToTensor()
        if self.type != 'Testing':
            self.data_dir = self.data_dir / Path('train')
        else:
            self.data_dir = self.data_dir / Path('test')
        self.class_folder_path = sorted(
            [_dir for _dir in self.data_dir.iterdir() if _dir.is_dir()])
        self.data_paths = []

        for i, idx in enumerate(self.chosen_index):
            folder_path = self.class_folder_path[idx]
            if self.type != 'Testing':
                csv_path = str(folder_path / csv_name)
                with open(csv_path, 'r', newline='') as csvfile:
                    rows = csv.reader(csvfile)
                    for _path, _type in rows:
                        if self.type == _type:
                            self.data_paths.append(
                                (_path, self.chosen_label[i]))
            else:
                file_paths = list(folder_path.glob('*.npy'))
                for _path in file_paths:
                    self.data_paths.append((_path, self.chosen_label[i]))
Пример #7
0
 def __init__(self,
              data_split_file_path,
              transforms,
              augments=None,
              **kwargs):
     super().__init__(**kwargs)
     data_split_file = pd.read_csv(data_split_file_path)
     patient_dirs = map(
         Path, data_split_file[data_split_file.type == self.type].path)
     self.data_paths = tuple(
         (patient_dir / f'{patient_dir.name}_t1.nii.gz',
          patient_dir / f'{patient_dir.name}_t1ce.nii.gz',
          patient_dir / f'{patient_dir.name}_t2.nii.gz',
          patient_dir / f'{patient_dir.name}_flair.nii.gz',
          (patient_dir / f'{patient_dir.name}_seg.nii.gz' if self.type ==
           'train' else Path(patient_dir.as_posix().
                             replace('brats17_resampled', 'brats17')) /
           f'{patient_dir.name}_seg.nii.gz'))
         for patient_dir in patient_dirs)
     self.transforms = Compose.compose(transforms)
     self.augments = Compose.compose(augments)
     self.to_tensor = ToTensor()
Пример #8
0
 def __init__(self,
              data_split_file_path,
              transforms,
              augments=None,
              **kwargs):
     super().__init__(**kwargs)
     data_split_file = pd.read_csv(data_split_file_path)
     self.csv_name = Path(data_split_file_path).name
     patient_dirs = map(
         Path, data_split_file[data_split_file.type == self.type].path)
     self.data_paths = tuple(
         (patient_dir / f'{patient_dir.name}_img.nii.gz',
          (patient_dir /
           f'{patient_dir.name}_img.nii.gz' if self.csv_name ==
           'testing.csv' else (
               patient_dir /
               f'{patient_dir.name}_label.nii.gz' if self.type ==
               'train' else Path(patient_dir.as_posix().
                                 replace('vipcup_resampled', 'vipcup')) /
               f'{patient_dir.name}_label.nii.gz')))
         for patient_dir in patient_dirs)
     self.transforms = Compose.compose(transforms)
     self.augments = Compose.compose(augments)
     self.to_tensor = ToTensor()
Пример #9
0
    def __init__(self, data_dir, train_data_csv, valid_data_csv, transforms=None, resize=None, augments=None, **kwargs):
        super().__init__(**kwargs)
        self.data_paths = []
        class_type = {}
        class_type['A'] = 0
        class_type['B'] = 1
        class_type['C'] = 2
        if self.type == 'train':
            csv_path = train_data_csv
        elif self.type == 'valid':
            csv_path = valid_data_csv
        else:
            raise Exception('The type of dataset is undefined!')
    
        with open(csv_path, "r") as f:
            rows = csv.reader(f)
            for i, row in enumerate(rows):
                if i == 0:
                    continue
                num_img, label = row
                img_path = str(Path(data_dir) / Path(num_img))
            
                self.data_paths.append((img_path, class_type[label]))

        # self.transforms = Compose.compose(transforms)
        # self.augments = Compose.compose(augments)
        # self.to_tensor = ToTensor()

        self.transform_train = torchTransform.Compose([
            # transforms.RandomRotation(15),
            torchTransform.ToTensor(),
            torchTransform.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225])
        ])
        self.transform_valid = torchTransform.Compose([
            # transforms.RandomRotation(15),
            torchTransform.ToTensor(),
            torchTransform.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225])
        ])
        
        self.augments = Compose.compose(augments)