Beispiel #1
0
    def __init__(self,
                 data_split_csv,
                 train_preprocessings,
                 valid_preprocessings,
                 transforms,
                 augments=None,
                 **kwargs):
        super().__init__(**kwargs)
        self.data_split_csv = data_split_csv
        self.train_preprocessings = compose(train_preprocessings)
        self.valid_preprocessings = compose(valid_preprocessings)
        self.transforms = compose(transforms)
        self.augments = compose(augments)

        self.data_paths = []
        self.input_paths = []

        # Collect the data paths according to the dataset split csv.
        with open(self.data_split_csv, "r") as f:
            type_ = 'train' if self.type == 'train' else 'valid'
            rows = csv.reader(f)
            for case_name, split_type in rows:
                if split_type == type_:
                    label_paths = sorted(
                        list((self.data_dir /
                              case_name).glob('clf_label*.npy')))
                    graph_paths = sorted(
                        list((self.data_dir / case_name).glob('graph.bin')))

                    self.data_paths.extend([(graph_path, label_path)
                                            for graph_path, label_path in zip(
                                                graph_paths, label_paths)])
    def __init__(self, downscale_factor, transforms, augments=None, num_frames=5, temporal_order='last', **kwargs):
        super().__init__(**kwargs)
        if downscale_factor not in [2, 3, 4]:
            raise ValueError(f'The downscale factor should be 2, 3, 4. Got {downscale_factor}.')
        self.downscale_factor = downscale_factor

        self.transforms = compose(transforms)
        self.augments = compose(augments)        
        self.num_frames = num_frames
        
        if temporal_order not in ['last', 'middle']:
            raise ValueError(f"The temporal order should be 'last' or 'middle'. Got {temporal_order}.")
        self.temporal_order = temporal_order        

        # Save the data paths and the target frame index for training; only need to save the data paths
        # for validation to process dynamic length of the sequences.
        lr_paths = sorted((self.data_dir / self.type / 'LR' / f'X{downscale_factor}').glob('**/*2d+1d*.nii.gz'))
        hr_paths = sorted((self.data_dir / self.type / 'HR').glob('**/*2d+1d*.nii.gz'))
        if self.type == 'train':
            self.data = []
            for lr_path, hr_path in zip(lr_paths, hr_paths):
                T = nib.load(str(lr_path)).header.get_data_shape()[-1]
                self.data.extend([(lr_path, hr_path, t) for t in range(T)])
        else:
            self.data = [(lr_path, hr_path) for lr_path, hr_path in zip(lr_paths, hr_paths)]
Beispiel #3
0
    def __init__(self,
                 data_split_csv,
                 re_sample_size,
                 train_preprocessings,
                 valid_preprocessings,
                 transforms,
                 augments=None,
                 **kwargs):
        super().__init__(**kwargs)
        self.data_split_csv = data_split_csv
        self.npoints = re_sample_size
        self.train_preprocessings = compose(train_preprocessings)
        self.valid_preprocessings = compose(valid_preprocessings)
        self.transforms = compose(transforms)
        self.augments = compose(augments)
        self.data_paths = []

        # Collect the data paths according to the dataset split csv.
        with open(self.data_split_csv, "r") as f:
            type_ = 'Training' if self.type == 'train' else 'Validation'
            rows = csv.reader(f)
            for file_name, split_type in rows:
                if split_type == type_:
                    data_path = self.data_dir / f'{file_name}'
                    self.data_paths.append(data_path)
 def __init__(self, data_dir, data_split_csv,
              device, net, metric_fns, saved_dir=None, exported=None):
     self.data_dir = data_dir
     self.data_split_csv = data_split_csv
     self.preprocessings = compose(preprocessings)
     self.transforms = compose(transforms)
     self.sample_size = sample_size
     self.shift = shift
     self.device = device
     self.net = net
     self.metric_fns = metric_fns
     self.saved_dir = saved_dir
     self.exported = exported
     self.log = self._init_log()
Beispiel #5
0
    def __init__(self,
                 data_split_csv,
                 train_preprocessings,
                 valid_preprocessings,
                 transforms,
                 augments=None,
                 **kwargs):
        super().__init__(**kwargs)
        self.data_split_csv = data_split_csv
        self.train_preprocessings = compose(train_preprocessings)
        self.valid_preprocessings = compose(valid_preprocessings)
        self.transforms = compose(transforms)
        self.augments = compose(augments)

        self.data_paths = []
        self.input_paths = []

        # Collect the data paths according to the dataset split csv.
        with open(self.data_split_csv, "r") as f:
            type_ = 'train' if self.type == 'train' else 'valid'
            rows = csv.reader(f)
            for case_name, split_type in rows:
                if split_type == type_:
                    image_paths = sorted(
                        list((self.data_dir / case_name).glob('image*.npy')))
                    label_paths = sorted(
                        list((self.data_dir / case_name).glob('label*.npy')))
                    gcn_label_paths = sorted(
                        list((self.data_dir /
                              case_name).glob('gcn_label*.npy')))
                    segments_paths = sorted(
                        list(
                            (self.data_dir / case_name).glob('segments*.npy')))
                    features_paths = sorted(
                        list(
                            (self.data_dir / case_name).glob('features*.npy')))
                    adj_paths = sorted(
                        list((self.data_dir / case_name).glob('adj_arr*.npy')))

                    self.data_paths.extend([
                        (image_path, label_path, gcn_label_path)
                        for image_path, label_path, gcn_label_path in zip(
                            image_paths, label_paths, gcn_label_paths)
                    ])
                    self.input_paths.extend([
                        (s_path, f_path, a_path)
                        for s_path, f_path, a_path in zip(
                            segments_paths, features_paths, adj_paths)
                    ])
    def __init__(self, downscale_factor, transforms, augments=None, **kwargs):
        super().__init__(**kwargs)
        if downscale_factor not in [2, 3, 4]:
            raise ValueError(
                f'The downscale factor should be 2, 3, 4. Got {downscale_factor}.'
            )
        self.downscale_factor = downscale_factor

        self.transforms = compose(transforms)
        self.augments = compose(augments)

        lr_paths = sorted((self.data_dir / self.type / 'LR' /
                           f'X{downscale_factor}').glob('**/*2d*.nii.gz'))
        hr_paths = sorted(
            (self.data_dir / self.type / 'HR').glob('**/*2d*.nii.gz'))
        self.data = [(lr_path, hr_path)
                     for lr_path, hr_path in zip(lr_paths, hr_paths)]
Beispiel #7
0
    def __init__(self, data_split_csv, positive_sampling_rate, sample_size,
                 preprocessings, augments, transforms, **kwargs):
        super().__init__(**kwargs)
        self.data_split_csv = data_split_csv
        self.positive_sampling_rate = positive_sampling_rate
        self.sample_size = sample_size
        self.preprocessings = compose(preprocessings)
        self.augments = compose(augments)
        self.transforms = compose(transforms)
        self.data_paths = []

        # Collect the data paths according to the dataset split csv.
        with open(self.data_split_csv, "r") as f:
            type_ = 'Training' if self.type == 'train' else 'Validation'
            rows = csv.reader(f)
            for case_name, split_type in rows:
                if split_type == type_:
                    image_path = self.data_dir / f'{case_name[:5]}-{case_name[5:]}image.nii.gz'
                    label_path = self.data_dir / f'{case_name[:5]}-{case_name[5:]}label_GTV1.nii.gz'
                    self.data_paths.append([image_path, label_path])
Beispiel #8
0
def test_composed_transforms(config, dummy_input):
    """Test to compose multiple augmentations
    including RandomCrop, Normalize, ToTensor.
    """
    cfg = config
    transforms = compose(cfg.dataset.transforms)

    # H, W, C
    image, label = dummy_input(image_size=(512, 512, 3),
                               label_size=(512, 512, 1))
    _image, _label = transforms(image,
                                label,
                                dtypes=[torch.float, torch.long],
                                elastic_deformation_orders=[3, 0])
    assert _image.dtype == torch.float
    assert _image.size() == (256, 256, image.shape[2])
    assert _label.dtype == torch.long
    assert _label.size() == (256, 256, label.shape[2])

    # Test feeding only image
    _image = transforms(image, dtypes=[torch.float])
    assert _image.dtype == torch.float
    assert _image.size() == (256, 256, image.shape[2])
Beispiel #9
0
 def __init__(self, image, label, transforms, **kwargs):
     super().__init__(**kwargs)
     self.image, self.label = image, label
     self.transforms = compose(transforms)