def get_batch_generator(self, batch_size=1):

        if self.data is not None:
            exp_utils.print_verbose(self.Config, "Loading data from PREDICT_IMG input file")
            data = np.nan_to_num(self.data)
            # Use dummy mask in case we only want to predict on some data (where we do not have Ground Truth))
            seg = np.zeros((self.Config.INPUT_DIM[0], self.Config.INPUT_DIM[0],
                            self.Config.INPUT_DIM[0], self.Config.NR_OF_CLASSES)).astype(self.Config.LABELS_TYPE)
        elif self.subject is not None:
            if self.Config.TYPE == "combined":
                # Load from Npy file for Fusion
                data = np.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, self.subject,
                                    self.Config.FEATURES_FILENAME + ".npy"), mmap_mode="r")
                seg = np.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, self.subject,
                                   self.Config.LABELS_FILENAME + ".npy"), mmap_mode="r")
                data = np.nan_to_num(data)
                seg = np.nan_to_num(seg)
                data = np.reshape(data, (data.shape[0], data.shape[1], data.shape[2], data.shape[3] * data.shape[4]))
            else:
                from tractseg.data.data_loader_training import load_training_data
                data, seg = load_training_data(self.Config, self.subject)
        else:
            raise ValueError("Neither 'data' nor 'subject' set.")

        if self.Config.DIM == "2D":
            batch_gen = BatchGenerator2D_data_ordered_standalone((data, seg), batch_size=batch_size)
        else:
            batch_gen = BatchGenerator3D_data_ordered_standalone((data, seg), batch_size=batch_size)
        batch_gen.Config = self.Config

        batch_gen = self._augment_data(batch_gen, type=type)
        return batch_gen
Example #2
0
    def generate_train_batch(self):
        subjects = self._data[0]
        # subject_idx = int(random.uniform(0, len(subjects)))     # len(subjects)-1 not needed because int always rounds to floor
        subject_idxs = np.random.choice(len(subjects), self.batch_size, False,
                                        None)

        x = []
        y = []
        for subject_idx in subject_idxs:
            data, seg = load_training_data(
                self.Config, subjects[subject_idx])  # (x, y, z, channels)
            data = data.transpose(3, 0, 1, 2)  # channels have to be first
            seg = seg.transpose(3, 0, 1, 2)

            x.append(data)
            y.append(seg)

        x = np.array(x)
        y = np.array(y)

        # Can be replaced by crop -> shorter
        # x = pad_nd_image(x, self.Config.INPUT_DIM, mode='constant', kwargs={'constant_values': 0})
        # y = pad_nd_image(y, self.Config.INPUT_DIM, mode='constant', kwargs={'constant_values': 0})
        # x = center_crop_3D_image_batched(x, self.Config.INPUT_DIM)
        # y = center_crop_3D_image_batched(y, self.Config.INPUT_DIM)

        # Crop and pad to input size
        # x, y = crop(x, y, crop_size=self.Config.INPUT_DIM)  # does not work with img with batches and channels

        # Works
        # This is needed for Schizo dataset
        x = pad_nd_image(x,
                         shape_must_be_divisible_by=(8, 8),
                         mode='constant',
                         kwargs={'constant_values': 0})
        y = pad_nd_image(y,
                         shape_must_be_divisible_by=(8, 8),
                         mode='constant',
                         kwargs={'constant_values': 0})

        x = x.astype(np.float32)
        y = y.astype(np.float32)

        data_dict = {
            "data": x,  # (batch_size, channels, x, y, [z])
            "seg": y
        }  # (batch_size, channels, x, y, [z])
        return data_dict
    def generate_train_batch(self):
        subjects = self._data[0]
        subject_idxs = np.random.choice(len(subjects), self.batch_size, False,
                                        None)

        x = []
        y = []
        for subject_idx in subject_idxs:
            data, seg = load_training_data(
                self.Config, subjects[subject_idx])  # (x, y, z, channels)
            data = data.transpose(3, 0, 1, 2)  # channels have to be first
            seg = seg.transpose(3, 0, 1, 2)

            # Crop here instead of cropping entire batch at once to make each element in batch have same dimensions
            data, seg = crop(data[None, ...],
                             seg[None, ...],
                             crop_size=self.Config.INPUT_DIM)
            data = data.squeeze(axis=0)
            seg = seg.squeeze(axis=0)

            x.append(data)
            y.append(seg)

        x = np.array(x)
        y = np.array(y)

        # Can be replaced by crop -> shorter
        # x = pad_nd_image(x, self.Config.INPUT_DIM, mode='constant', kwargs={'constant_values': 0})
        # y = pad_nd_image(y, self.Config.INPUT_DIM, mode='constant', kwargs={'constant_values': 0})
        # x = center_crop_3D_image_batched(x, self.Config.INPUT_DIM)
        # y = center_crop_3D_image_batched(y, self.Config.INPUT_DIM)

        # Crop and pad to input size
        # x, y = crop(x, y, crop_size=self.Config.INPUT_DIM)

        # This is needed for Schizo dataset, but only works with DAug=True
        # x = pad_nd_image(x, shape_must_be_divisible_by=(8, 8), mode='constant', kwargs={'constant_values': 0})
        # y = pad_nd_image(y, shape_must_be_divisible_by=(8, 8), mode='constant', kwargs={'constant_values': 0})

        x = x.astype(np.float32)
        y = y.astype(np.float32)

        data_dict = {
            "data": x,  # (batch_size, channels, x, y, [z])
            "seg": y
        }  # (batch_size, channels, x, y, [z])
        return data_dict
Example #4
0
    def generate_train_batch(self):
        subjects = self._data[0]
        # subject_idx = int(random.uniform(0, len(subjects)))     # len(subjects)-1 not needed because int always rounds to floor
        subject_idxs = np.random.choice(len(subjects), self.batch_size, False,
                                        None)

        x = []
        y = []
        for subject_idx in subject_idxs:
            data, seg = load_training_data(
                self.Config, subjects[subject_idx])  # (x, y, z, channels)
            data = data.transpose(3, 0, 1, 2)  # channels have to be first
            seg = seg.transpose(3, 0, 1, 2)
            x.append(data)
            y.append(seg)

        data_dict = {
            "data": np.array(x),  # (batch_size, channels, x, y, [z])
            "seg": np.array(y)
        }  # (batch_size, channels, x, y, [z])
        return data_dict