예제 #1
0
def make_train_set(dwi_file=None,
                   trk_file=None,
                   save_path=None,
                   block_size=3,
                   samples_percent=1.0,
                   n_samples=None,
                   min_fiber_length=0,
                   n_incoming=1):
    """Save training set as pickle"""

    if samples_percent < 1 and n_samples is not None:
        raise RuntimeError("n_samples must be None, if samples_percent < 1.")

    # The labels are the real vectors.
    label_type = "point"

    example_loader = PointExamples(nii_file=dwi_file,
                                   trk_file=trk_file,
                                   block_size=block_size,
                                   example_percent=samples_percent,
                                   num_eval_examples=0,
                                   min_fiber_length=min_fiber_length,
                                   last_incoming=n_incoming)

    X = {
        'blocks': [],
        'incoming': [],
        'centers': [],
    }
    y = []

    if n_samples is None:
        n_samples = len(example_loader.train_labels)

    nii_aff = example_loader.brain_file.affine
    trk_aff = nib.trackvis.aff_from_hdr(example_loader.fiber_header)

    assert np.allclose(nii_aff, trk_aff)

    for idx, label in enumerate(example_loader.train_labels):
        if idx >= n_samples:
            break
        block = PointExamples.build_datablock(example_loader.brain_data,
                                              example_loader.block_size,
                                              label['center'],
                                              label['incoming'],
                                              label['outgoing'], label_type,
                                              nii_aff)
        X['blocks'].append(block['data_block'])
        X['incoming'].append(block['incoming'])
        X['centers'].append(block['center'])
        y.append(block['outgoing'])

    for key in X.keys():
        X[key] = np.array(X[key])
    y = np.array(y)

    joblib.dump(X, os.path.join(save_path, "train_X.pkl"))
    joblib.dump(y, os.path.join(save_path, "train_y.pkl"))
예제 #2
0
    def setUpClass(self):
        TRK = "tests/data/fibers.trk"
        NII = "tests/data/dwi_train.nii.gz"
        self.loader = PointExamples(
            NII,
            TRK,
            block_size=3,
            n_incoming=3,
            num_eval_examples=0,
            example_percent=0.25,
        )

        self.generator = self.loader.get_generator()()
        self.example = next(self.generator)
예제 #3
0
    def _build_next_X(self, brain_data, ongoing_fibers, affine):
        """Builds the next X-batch to be fed to the model.

        The X-batch created continues the streamline based on the outgoing directions obtained at
        the previous step.

        Returns:
            next_X: The next batch of point values (blocks, incoming, centers).
        """
        label_type = "point"
        X = {'incoming': [], 'blocks': []}

        for fiber in ongoing_fibers:
            center_point = fiber[-1]
            incoming_point = np.zeros((self.input_fn_config["n_incoming"], 3))
            outgoing = np.zeros(3)
            for i in range(
                    min(self.input_fn_config["n_incoming"],
                        len(fiber) - 1)):
                incoming_point[i] = fiber[-i - 2]
            sample = PointExamples.build_datablock(
                brain_data, self.input_fn_config["block_size"], center_point,
                incoming_point, outgoing, label_type, affine)
            X_sample = {
                'incoming': sample['incoming'].reshape(-1, 3),
                'blocks': sample['data_block']
            }
            # Add example to examples by appending individual lists
            for key, cur_list in X.items():
                cur_list.append(X_sample[key])

        for key, _ in X.items():
            X[key] = np.array(X[key])

        return X
예제 #4
0
def tracking_input(nii_file,
                   trk_file,
                   block_size,
                   n_incoming,
                   batch_size,
                   num_epochs,
		           shuffle=True,
		           buffer_size=10000,
                   min_fiber_length=0,
                   every_n_fibers=None,
                   load_only_n_samples=False):

    examples = PointExamples(nii_file,
                        trk_file,
                        block_size,
                        n_incoming=n_incoming,
                        num_eval_examples=0,
                        min_fiber_length=min_fiber_length,
                        every_n_fibers=every_n_fibers,
                        load_only_n_samples=load_only_n_samples)

    generator = examples.get_generator()

    block_shape = [block_size] * 3 + [examples.brain_data[0].shape[-1]]
    incoming_shape = [n_incoming, 3]

    def input_fn():
        dataset =tf.data.Dataset.from_generator(generator,
            ({"blocks": tf.float32, "incoming": tf.float32}, tf.float32),
            ({"blocks": tf.TensorShape(block_shape),
              "incoming": tf.TensorShape(incoming_shape)},
            tf.TensorShape([3])))
        dataset = dataset.batch(batch_size)
        if shuffle:
            dataset = dataset.shuffle(buffer_size=buffer_size)
        dataset = dataset.repeat(num_epochs)
        iterator = dataset.make_one_shot_iterator()
        return iterator.get_next()
              
    feature_spec = {}
    feature_spec["blocks"] = np_placeholder(np.ones(tuple(block_shape), dtype=np.float32))
    feature_spec["incoming"] = np_placeholder(np.ones(tuple(incoming_shape), dtype=np.float32))
        
    train_size = min(examples.n_labels) * len(examples.brain_data)

    return input_fn, feature_spec, train_size
예제 #5
0
 def setUpClass(cls):
     TRK = "data/iFOD2_skip100.trk"
     NII = "data/FODl4.nii.gz"
     PATH = "tests"
     cls.loader = PointExamples(
         NII,
         TRK,
         block_size=3,
         num_eval_examples=0,
         example_percent=0.25,
     )
예제 #6
0
    def transform(self, X):

        if isinstance(X, list):
            X = X[0]

        examples = PointExamples(trk_file=X, n_incoming=self.n_incoming)

        return {
            "labels": examples.train_labels,
            "affine": examples.affine,
            "fiber_header": examples.fiber_header
        }
예제 #7
0
class TestPointExamples(unittest.TestCase):
    """Test some functionalities of the example loader."""
    @classmethod
    def setUpClass(self):
        TRK = "tests/data/fibers.trk"
        NII = "tests/data/dwi_train.nii.gz"
        self.loader = PointExamples(
            NII,
            TRK,
            block_size=3,
            n_incoming=3,
            num_eval_examples=0,
            example_percent=0.25,
        )

        self.generator = self.loader.get_generator()()
        self.example = next(self.generator)

    @classmethod
    def tearDownClass(self):
        pass

    def test_generator(self):
        self.assertIsInstance(self.generator, types.GeneratorType)

    def test_example(self):
        self.assertIsInstance(self.example, tuple)
        self.assertEqual(len(self.example), 2)

    def test_features(self):
        features = self.example[0]
        self.assertIsInstance(features, dict)
        self.assertEqual(len(features), 2)

        self.assertTrue("blocks" in features)
        self.assertTrue("incoming" in features)

        self.assertEqual(features["blocks"].shape, (3, 3, 3, 15))
        self.assertEqual(features["incoming"].shape, (3, 3))

    def test_target(self):
        target = self.example[1]
        self.assertIsInstance(target, np.ndarray)
        self.assertEqual(target.shape, (3, ))
예제 #8
0
    def fvm_scalars(self,
                    trk_file,
                    nii_file,
                    min_pts_per_fiber=2,
                    every_n_fibers=1):
        """Produce trk file marked with concentration and fvm_probab.

        fvm_scalars produces concentration and fvm_probab scalars that
        should be passed on to utils.save_fibers

        Args:
            trk_file (str): Path to trk file that contains the fibers to be marked.
            nii_file (str): Path to nifti file with corresponding diffusion data.

        Returns:
            scalars (dict): Dict to lists of shape (n_tracks, ). Currently, there
                            are only two keys: "concentration" and "fvm_probab".
            all_tracks (list): List of unmarked tracks of shape (n_tracks, ).
            trk_hdr: Header of trk_file.

        TODO:
            * Add some kind of skip parameter to reduce computation time.
            * Add min_length parameter.
            * Make fvm_scalars useable with trained model and not only during
              training.
        """
        tracks, trk_hdr = nib.trackvis.read(trk_file, points_space="voxel")
        trk_aff = nib.trackvis.aff_from_hdr(trk_hdr)
        all_tracks = []
        for i, track in enumerate(tracks):
            if len(track[0]) >= min_pts_per_fiber and i % every_n_fibers == 0:
                all_tracks.append(track[0])

        tracks = all_tracks[:]

        nii_file = nib.load(nii_file)
        nii_data = nii_file.get_data()
        nii_hdr = nii_file.header
        nii_aff = nii_file.affine

        assert np.allclose(nii_aff, trk_aff)

        block_size = self.input_fn_config["block_size"]
        n_incoming = self.input_fn_config["n_incoming"]
        n_tracks = len(all_tracks)

        predictor = self.predictor()

        ongoing_idx = list(range(n_tracks))
        track_lengths = list(map(lambda track: len(track), tracks))
        scalars = {
            "concentration": [[] for _ in range(n_tracks)],
            "fvm_probab": [[] for _ in range(n_tracks)],
            "angles": [[] for _ in range(n_tracks)],
            "inverted": [[] for _ in range(n_tracks)],
            "probab": []
        }

        cprint("Marking {} fibers...".format(n_tracks),
               "green",
               "on_grey",
               flush=True)
        while len(ongoing_idx) > 0:
            ongoing_tracks = [tracks[i] for i in ongoing_idx]
            n_ongoing = len(ongoing_tracks)
            batch = self._build_next_X(nii_data, ongoing_tracks, nii_aff)
            predictions = predictor(batch)

            for i, kappa in enumerate(predictions["concentration"]):
                scalars["concentration"][ongoing_idx[i]].append(kappa[0])

            for i in range(n_ongoing):
                k = predictions["concentration"][i][0]
                mu = self.apply_affine(predictions["mean"][i], nii_aff)

                ongoing_len = len(ongoing_tracks[i])
                assert ongoing_len >= 1

                if ongoing_len == len(all_tracks[ongoing_idx[i]]):
                    v = PointExamples.points_to_relative(
                        all_tracks[ongoing_idx[i]][-2],
                        all_tracks[ongoing_idx[i]][-1])
                else:
                    v = PointExamples.points_to_relative(
                        all_tracks[ongoing_idx[i]][ongoing_len - 1],
                        all_tracks[ongoing_idx[i]][ongoing_len])

                v = self.apply_affine(v, nii_aff)
                inner_prod = np.clip(np.inner(mu, v), -1.0, 1.0)

                mu = np.sign(inner_prod) * mu
                scalars["inverted"][ongoing_idx[i]].append(
                    1 if inner_prod < 0 else 0)

                fvm_probab = np.log(self.fvm_probab(v, mu, k) + 10**-12)
                scalars["fvm_probab"][ongoing_idx[i]].append(fvm_probab)

                angle = map_to_90deg_range(np.rad2deg(np.arccos(inner_prod)))
                scalars["angles"][ongoing_idx[i]].append(angle)

            tracks = list(map(lambda x: x[:-1], tracks))
            ongoing_idx = list(
                filter(lambda i: len(tracks[i]) > 0, ongoing_idx))

            cprint("{:6d} / {} fibers going on.".format(
                len(ongoing_idx), n_tracks),
                   "red",
                   "on_grey",
                   end="\r",
                   flush=True)

        for i, fvm_probab in enumerate(scalars["fvm_probab"]):
            probab = np.sum(fvm_probab)
            scalars["probab"].append([probab] * track_lengths[i])

        self.assert_equal_length(all_tracks, scalars)

        return scalars, all_tracks, trk_hdr