Esempio n. 1
0
def test_cascade_of_optimizations():

    cingulum_bundles = two_cingulum_bundles()

    cb1 = cingulum_bundles[0]
    cb1 = set_number_of_points(cb1, 20)

    test_x0 = np.array([10, 4, 3, 0, 20, 10, 1.5, 1.5, 1.5, 0., 0.2, 0])

    cb2 = transform_streamlines(cingulum_bundles[0],
                                compose_matrix44(test_x0))
    cb2 = set_number_of_points(cb2, 20)

    print('first rigid')
    slr = StreamlineLinearRegistration(x0=6)
    slm = slr.optimize(cb1, cb2)

    print('then similarity')
    slr2 = StreamlineLinearRegistration(x0=7)
    slm2 = slr2.optimize(cb1, cb2, slm.matrix)

    print('then affine')
    slr3 = StreamlineLinearRegistration(x0=12, options={'maxiter': 50})
    slm3 = slr3.optimize(cb1, cb2, slm2.matrix)

    assert_(slm2.fopt < slm.fopt)
    assert_(slm3.fopt < slm2.fopt)
Esempio n. 2
0
    def _register_neighb_to_model(self, model_bundle, neighb_streamlines,
                                  metric=None, x0=None, bounds=None,
                                  select_model=400, select_target=600,
                                  method='L-BFGS-B',
                                  nb_pts=20, num_threads=None):

        if self.verbose:
            print('# Local SLR of neighb_streamlines to model')
            t = time()

        if metric is None or metric == 'symmetric':
            metric = BundleMinDistanceMetric(num_threads=num_threads)
        if metric == 'asymmetric':
            metric = BundleMinDistanceAsymmetricMetric()
        if metric == 'diagonal':
            metric = BundleSumDistanceMatrixMetric()

        if x0 is None:
            x0 = 'similarity'

        if bounds is None:
            bounds = [(-30, 30), (-30, 30), (-30, 30),
                      (-45, 45), (-45, 45), (-45, 45), (0.8, 1.2)]

        # TODO this can be speeded up by using directly the centroids
        static = select_random_set_of_streamlines(model_bundle,
                                                  select_model, rng=self.rng)
        moving = select_random_set_of_streamlines(neighb_streamlines,
                                                  select_target, rng=self.rng)

        static = set_number_of_points(static, nb_pts)
        moving = set_number_of_points(moving, nb_pts)

        slr = StreamlineLinearRegistration(metric=metric, x0=x0,
                                           bounds=bounds,
                                           method=method)
        slm = slr.optimize(static, moving)

        transf_streamlines = neighb_streamlines.copy()
        transf_streamlines._data = apply_affine(
            slm.matrix, transf_streamlines._data)

        transf_matrix = slm.matrix
        slr_bmd = slm.fopt
        slr_iterations = slm.iterations

        if self.verbose:
            print(' Square-root of BMD is %.3f' % (np.sqrt(slr_bmd),))
            if slr_iterations is not None:
                print(' Number of iterations %d' % (slr_iterations,))
            print(' Matrix size {}'.format(slm.matrix.shape))
            original = np.get_printoptions()
            np.set_printoptions(3, suppress=True)
            print(transf_matrix)
            print(slm.xopt)
            np.set_printoptions(**original)

            print(' Duration %0.3f sec. \n' % (time() - t,))

        return transf_streamlines, slr_bmd
Esempio n. 3
0
def bench_set_number_of_points():
    repeat = 5
    nb_streamlines = DATA['nb_streamlines']

    msg = "Timing set_number_of_points() with {0:,} streamlines."
    print(msg.format(nb_streamlines * repeat))
    cython_time = measure("set_number_of_points(streamlines, nb_points)",
                          repeat)
    print("Cython time: {0:.3f} sec".format(cython_time))

    python_time = measure("[set_number_of_points_python(s, nb_points)"
                          " for s in streamlines]", repeat)
    print("Python time: {0:.2f} sec".format(python_time))
    print("Speed up of {0:.2f}x".format(python_time/cython_time))

    # Make sure it produces the same results.
    assert_array_almost_equal([set_number_of_points_python(s) for s in DATA["streamlines"]],
                              set_number_of_points(DATA["streamlines"]))

    cython_time_arrseq = measure("set_number_of_points(streamlines, nb_points)", repeat)
    print("Cython time (ArrSeq): {0:.3f} sec".format(cython_time_arrseq))
    print("Speed up of {0:.2f}x".format(python_time/cython_time_arrseq))

    # Make sure it produces the same results.
    assert_array_equal(set_number_of_points(DATA["streamlines"]),
                       set_number_of_points(DATA["streamlines_arrseq"]))
Esempio n. 4
0
def test_set_number_of_points_memory_leaks():
    # Test some dtypes
    dtypes = [np.float32, np.float64, np.int32, np.int64]
    for dtype in dtypes:
        rng = np.random.RandomState(1234)
        NB_STREAMLINES = 10000
        streamlines = [rng.randn(rng.randint(10, 100), 3).astype(dtype) for _ in range(NB_STREAMLINES)]

        list_refcount_before = get_type_refcount()["list"]

        rstreamlines = set_number_of_points(streamlines, nb_points=2)
        list_refcount_after = get_type_refcount()["list"]
        del rstreamlines  # Delete `rstreamlines` because it holds a reference to `list`.

        # Calling `set_number_of_points` should increase the refcount of `list` by one
        # since we kept the returned value.
        assert_equal(list_refcount_after, list_refcount_before+1)

    # Test mixed dtypes
    rng = np.random.RandomState(1234)
    NB_STREAMLINES = 10000
    streamlines = []
    for i in range(NB_STREAMLINES):
        dtype = dtypes[i % len(dtypes)]
        streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))

    list_refcount_before = get_type_refcount()["list"]

    rstreamlines = set_number_of_points(streamlines, nb_points=2)
    list_refcount_after = get_type_refcount()["list"]

    # Calling `set_number_of_points` should increase the refcount of `list` by one
    # since we kept the returned value.
    assert_equal(list_refcount_after, list_refcount_before+1)
Esempio n. 5
0
def ba_analysis(recognized_bundle, expert_bundle, threshold=2.):

    recognized_bundle = set_number_of_points(recognized_bundle, 20)

    expert_bundle = set_number_of_points(expert_bundle, 20)

    return bundle_adjacency(recognized_bundle, expert_bundle, threshold)
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    full_tfile = nib.streamlines.load(args.full_tfile)
    model_tfile = nib.streamlines.load(args.model_tfile)
    model_mask = nib.load(args.model_mask)

    # Bring streamlines to voxel space and where coordinate (0,0,0) represents the corner of a voxel.
    model_tfile.tractogram.apply_affine(np.linalg.inv(model_mask.affine))
    model_tfile.streamlines._data += 0.5  # Shift of half a voxel
    full_tfile.tractogram.apply_affine(np.linalg.inv(model_mask.affine))
    full_tfile.streamlines._data += 0.5  # Shift of half a voxel

    assert(model_mask.get_data().sum() == create_binary_map(model_tfile.streamlines, model_mask).sum())

    # Resample streamlines
    full_streamlines = set_number_of_points(full_tfile.streamlines, args.nb_points_resampling)
    model_streamlines = set_number_of_points(model_tfile.streamlines, args.nb_points_resampling)

    # Segment model
    rng = np.random.RandomState(42)
    indices = np.arange(len(model_streamlines))
    rng.shuffle(indices)
    qb = QuickBundles(args.qb_threshold)
    clusters = qb.cluster(model_streamlines, ordering=indices)

    # Try to find optimal assignment threshold
    best_threshold = None
    best_f1_score = -np.inf
    thresholds = np.arange(-2, 10, 0.2) + args.qb_threshold
    for threshold in thresholds:
        indices = qb.find_closest(clusters, full_streamlines, threshold=threshold)
        nb_assignments = np.sum(indices != -1)

        mask = create_binary_map(full_tfile.streamlines[indices != -1], model_mask)

        overlap_per_bundle = _compute_overlap(model_mask.get_data(), mask)
        overreach_per_bundle = _compute_overreach(model_mask.get_data(), mask)
        # overreach_norm_gt_per_bundle = _compute_overreach_normalize_gt(model_mask.get_data(), mask)
        f1_score = _compute_f1_score(overlap_per_bundle, overreach_per_bundle)
        if best_f1_score < f1_score:
            best_threshold = threshold
            best_f1_score = f1_score

        print("{}:\t {}/{} ({:.1%}) {:.1%}/{:.1%} ({:.1%}) {}/{}".format(
            threshold,
            nb_assignments, len(model_streamlines), nb_assignments/len(model_streamlines),
            overlap_per_bundle, overreach_per_bundle, f1_score,
            mask.sum(), model_mask.get_data().sum()))

        if overlap_per_bundle >= 1:
            break


    print("Best threshold: {} with F1-Score of {}".format(best_threshold, best_f1_score))
Esempio n. 7
0
    def test_set_number_of_points(self):
        # Test resampling of only one streamline
        nb_points = 12
        modified_streamline_cython = dipystreamline.set_number_of_points(self.streamline, nb_points)
        modified_streamline_python = set_number_of_points_python(self.streamline, nb_points)
        assert_equal(len(modified_streamline_cython), nb_points)
        # Using a 5 digits precision because of streamline is in float32.
        assert_array_almost_equal(modified_streamline_cython, modified_streamline_python, 5)

        modified_streamline_cython = dipystreamline.set_number_of_points(self.streamline_64bit, nb_points)
        modified_streamline_python = set_number_of_points_python(self.streamline_64bit, nb_points)
        assert_equal(len(modified_streamline_cython), nb_points)
        assert_array_almost_equal(modified_streamline_cython, modified_streamline_python)

        res = []
        simple_streamline = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
        for nb_points in range(2, 200):
            modified_streamline_cython = dipystreamline.set_number_of_points(simple_streamline, nb_points)
            res.append(nb_points - len(modified_streamline_cython))

        assert_equal(np.sum(res), 0)

        # Test resampling of multiple streamlines of different nb_points
        nb_points = 12
        modified_streamlines_cython = dipystreamline.set_number_of_points(self.streamlines, nb_points)

        for i, s in enumerate(self.streamlines):
            modified_streamline_python = set_number_of_points_python(s, nb_points)
            # Using a 5 digits precision because of streamline is in float32.
            assert_array_almost_equal(modified_streamlines_cython[i], modified_streamline_python, 5)

        modified_streamlines_cython = dipystreamline.set_number_of_points(self.streamlines_64bit, nb_points)

        for i, s in enumerate(self.streamlines_64bit):
            modified_streamline_python = set_number_of_points_python(s, nb_points)
            assert_array_almost_equal(modified_streamlines_cython[i], modified_streamline_python)

        # Test streamlines with mixed dtype
        streamlines = [self.streamline, self.streamline.astype(np.float64)]
        assert_raises(ValueError, dipystreamline.set_number_of_points, streamlines, nb_points)

        # Test streamline with shape not Nx3
        assert_raises(ValueError, dipystreamline.set_number_of_points, self.streamline.T, nb_points)

        # Test streamline with integer dtype
        modified_streamline = dipystreamline.set_number_of_points(self.streamline.astype(np.int32))
        assert_true(modified_streamline.dtype == np.float32)
        modified_streamline = dipystreamline.set_number_of_points(self.streamline.astype(np.int64))
        assert_true(modified_streamline.dtype == np.float64)

        # Test empty list
        assert_equal(dipystreamline.set_number_of_points([]), [])

        # Test streamline having only one point
        assert_raises(ValueError, dipystreamline.set_number_of_points, np.array([[1, 2, 3]]))

        # We do not support list of lists, it should be numpy ndarray.
        streamline = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
        assert_raises(AttributeError, dipystreamline.set_number_of_points, streamline)
Esempio n. 8
0
    def _prepare_batch(self, indices):
        orig_streamlines = self.dataset.streamlines[indices].copy()
        streamlines = self._add_noise_to_streamlines(orig_streamlines)

        # streamline_length = np.max(streamlines._lengths)  # Sequences are resampled so that they have the same length.
        streamline_length = np.min(streamlines._lengths)  # Sequences are resampled so that they have the same length.
        streamlines._lengths = streamlines._lengths.astype("int64")
        streamlines = set_number_of_points(streamlines, nb_points=streamline_length)
        inputs = streamlines._data  # Streamlines coordinates
        targets = streamlines._data[1:] - streamlines._data[:-1]  # Unnormalized directions

        batch_size = len(streamlines)
        if self.use_augment_by_flipping:
            batch_size *= 2

        if self.include_last_point:  # only for the input
            raise NotImplementedError()

        else:
            batch_inputs = np.zeros((batch_size, streamline_length - self.k, inputs.shape[1]), dtype=floatX)
            batch_targets = np.zeros((batch_size, streamline_length - self.k, self.k, self.target_size), dtype=floatX)

            for i, (offset, length) in enumerate(zip(streamlines._offsets, streamlines._lengths)):
                n = length - self.k
                batch_inputs[i, :n] = inputs[offset:offset + n]
                batch_targets[i, :n] = self._window_stack(targets[offset:offset + length - 1, None], self.k)

                if self.use_augment_by_flipping:
                    batch_inputs[i + len(streamlines), :n] = inputs[offset + self.k:offset + length][::-1]
                    batch_targets[i + len(streamlines), :n] = self._window_stack(-targets[offset:offset + length - 1, None][::-1], self.k)

        return batch_inputs, batch_targets
Esempio n. 9
0
def test_quickbundles_with_python_metric():

    class MDFpy(dipymetric.Metric):
        def are_compatible(self, shape1, shape2):
            return shape1 == shape2

        def dist(self, features1, features2):
            dist = np.sqrt(np.sum((features1 - features2)**2, axis=1))
            dist = np.sum(dist / len(features1))
            return dist

    rdata = streamline_utils.set_number_of_points(data, 10)
    qb = QuickBundles(threshold=2 * threshold, metric=MDFpy())

    clusters = qb.cluster(rdata)

    # By default `refdata` refers to data being clustered.
    assert_equal(clusters.refdata, rdata)
    # Set `refdata` to return indices instead of actual data points.
    clusters.refdata = None
    assert_array_equal(list(itertools.chain(*clusters)),
                       list(itertools.chain(*clusters_truth)))

    # Cluster read-only data
    for datum in rdata:
        datum.setflags(write=False)

    # Cluster data with different dtype (should be converted into float32)
    for datatype in [np.float64, np.int32, np.int64]:
        newdata = [datum.astype(datatype) for datum in rdata]
        clusters = qb.cluster(newdata)
        assert_equal(clusters.centroids[0].dtype, np.float32)
Esempio n. 10
0
    def evaluate_results(self, model_bundle, pruned_streamlines, slr_select):
        """ Comapare the similiarity between two given bundles, model bundle,
        and extracted bundle.

        Parameters
        ----------
        model_bundle : Streamlines
        pruned_streamlines : Streamlines
        slr_select : tuple
            Select the number of streamlines from model to neirborhood of
            model to perform the local SLR.

        Returns
        -------
        ba_value : float
            bundle analytics value between model bundle and pruned bundle
        bmd_value : float
            bundle minimum distance value between model bundle and
            pruned bundle
        """

        spruned_streamlines = Streamlines(pruned_streamlines)
        recog_centroids = self._cluster_model_bundle(
            spruned_streamlines,
            model_clust_thr=1.25)
        mod_centroids = self._cluster_model_bundle(
            model_bundle,
            model_clust_thr=1.25)
        recog_centroids = Streamlines(recog_centroids)
        model_centroids = Streamlines(mod_centroids)
        ba_value = ba_analysis(recog_centroids, model_centroids, threshold=10)

        BMD = BundleMinDistanceMetric()
        static = select_random_set_of_streamlines(model_bundle,
                                                  slr_select[0])
        moving = select_random_set_of_streamlines(pruned_streamlines,
                                                  slr_select[1])
        nb_pts = 20
        static = set_number_of_points(static, nb_pts)
        moving = set_number_of_points(moving, nb_pts)

        BMD.setup(static, moving)
        x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1, 0, 0, 0])  # affine
        bmd_value = BMD.distance(x0.tolist())

        return ba_value, bmd_value
Esempio n. 11
0
def test_recobundles_flow():
    with TemporaryDirectory() as out_dir:
        data_path = get_fnames('fornix')
        streams, hdr = nib.trackvis.read(data_path)
        fornix = [s[0] for s in streams]

        f = Streamlines(fornix)
        f1 = f.copy()

        f2 = f1[:15].copy()
        f2._data += np.array([40, 0, 0])

        f.extend(f2)

        f2_path = pjoin(out_dir, "f2.trk")
        save_trk(f2_path, f2, affine=np.eye(4))

        f1_path = pjoin(out_dir, "f1.trk")
        save_trk(f1_path, f, affine=np.eye(4))

        rb_flow = RecoBundlesFlow(force=True)
        rb_flow.run(f1_path, f2_path, greater_than=0, clust_thr=10,
                    model_clust_thr=5., reduction_thr=10, out_dir=out_dir)

        labels = rb_flow.last_generated_outputs['out_recognized_labels']
        recog_trk = rb_flow.last_generated_outputs['out_recognized_transf']

        rec_bundle, _ = load_trk(recog_trk)
        npt.assert_equal(len(rec_bundle) == len(f2), True)

        label_flow = LabelsBundlesFlow(force=True)
        label_flow.run(f1_path, labels)

        recog_bundle = label_flow.last_generated_outputs['out_bundle']
        rec_bundle_org, _ = load_trk(recog_bundle)

        BMD = BundleMinDistanceMetric()
        nb_pts = 20
        static = set_number_of_points(f2, nb_pts)
        moving = set_number_of_points(rec_bundle_org, nb_pts)

        BMD.setup(static, moving)
        x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1, 0, 0, 0])  # affine
        bmd_value = BMD.distance(x0.tolist())

        npt.assert_equal(bmd_value < 1, True)
Esempio n. 12
0
def test_vectorize_streamlines():

    cingulum_bundles = two_cingulum_bundles()

    cb_subj1 = cingulum_bundles[0]
    cb_subj1 = set_number_of_points(cb_subj1, 10)
    cb_subj1_pts_no = np.array([s.shape[0] for s in cb_subj1])

    assert_equal(np.all(cb_subj1_pts_no == 10), True)
Esempio n. 13
0
    def _prepare_batch(self, indices):
        orig_streamlines, volume_ids = self.dataset[indices]
        streamlines = self._add_noise_to_streamlines(orig_streamlines.copy())

        streamlines._lengths = streamlines._lengths.astype("int64")
        if self.resample_streamlines:
            # streamline_length = np.max(streamlines._lengths)  # Sequences are resampled so that they have the same length.
            streamline_length = np.min(streamlines._lengths)  # Sequences are resampled so that they have the same length.
            streamlines = set_number_of_points(streamlines, nb_points=streamline_length)

        inputs = streamlines._data  # Streamlines coordinates
        targets = streamlines._data[1:] - streamlines._data[:-1]  # Unnormalized directions
        if self.normalize_target:
            targets = targets / np.sqrt(np.sum(targets ** 2, axis=1, keepdims=True))  # Normalized directions

        actual_batch_size = sum(map(lambda x: len(x)-1, streamlines))
        if self.use_augment_by_flipping:
            half_batch_size = actual_batch_size
            actual_batch_size *= 2

        inputs_shape = inputs.shape[1]
        if self.feed_previous_direction:
            inputs_shape *= 2

        batch_inputs = np.zeros((actual_batch_size, inputs_shape), dtype=floatX)
        batch_targets = np.zeros((actual_batch_size, 3), dtype=floatX)
        batch_array_index = 0

        for i, (offset, length, volume_id) in enumerate(zip(streamlines._offsets, streamlines._lengths, volume_ids)):

            start = batch_array_index
            end = batch_array_index + (length - 1)
            batch_array_index += length - 1

            batch_inputs[start:end, :3] = inputs[offset:offset + length - 1]  # [0, 1, 2, 3, 4] => [0, 1, 2, 3]
            batch_targets[start:end] = targets[offset:offset + length - 1]  # [1-0, 2-1, 3-2, 4-3] => [1-0, 2-1, 3-2, 4-3]

            if self.feed_previous_direction:
                batch_inputs[start, 3:] = np.zeros((1, 3))
                batch_inputs[start + 1:end, 3:] = batch_targets[start:end - 1]

            if self.use_augment_by_flipping:
                flipped_start = start + half_batch_size
                flipped_end = end + half_batch_size
                batch_inputs[flipped_start:flipped_end] = inputs[offset + 1:offset + length][::-1]  # [0, 1, 2, 3, 4] => [4, 3, 2, 1]
                batch_targets[flipped_start:flipped_end] = -targets[offset:offset + length - 1][::-1]  # [1-0, 2-1, 3-2, 4-3] => [4-3, 3-2, 2-1, 1-0]

                if self.feed_previous_direction:
                    batch_inputs[flipped_start, 3:] = np.zeros((1, 3))
                    batch_inputs[flipped_start + 1:flipped_end, 3:] = batch_targets[flipped_start:flipped_end - 1]

        batch_volume_ids = np.repeat(volume_ids, list(map(lambda x: len(x)-1, streamlines)))
        if self.use_augment_by_flipping:
            batch_volume_ids = np.tile(batch_volume_ids, [2])
        batch_inputs = np.concatenate([batch_inputs, batch_volume_ids[:, None]], axis=1)  # Streamlines coords + dwi ID

        return batch_inputs, batch_targets
Esempio n. 14
0
def test_rigid_partial_real_bundles():

    static = fornix_streamlines()[:20]
    moving = fornix_streamlines()[20:40]
    static_center, shift = center_streamlines(static)
    moving_center, shift2 = center_streamlines(moving)

    print(shift2)
    mat = compose_matrix(translate=np.array([0, 0, 0.]),
                         angles=np.deg2rad([40, 0, 0.]))
    moved = transform_streamlines(moving_center, mat)

    srr = StreamlineLinearRegistration()

    srm = srr.optimize(static_center, moved)
    print(srm.fopt)
    print(srm.iterations)
    print(srm.funcs)

    moving_back = srm.transform(moved)
    print(srm.matrix)

    static_center = set_number_of_points(static_center, 100)
    moving_center = set_number_of_points(moving_back, 100)

    vol = np.zeros((100, 100, 100))
    spts = np.concatenate(static_center, axis=0)
    spts = np.round(spts).astype(np.int) + np.array([50, 50, 50])

    mpts = np.concatenate(moving_center, axis=0)
    mpts = np.round(mpts).astype(np.int) + np.array([50, 50, 50])

    for index in spts:
        i, j, k = index
        vol[i, j, k] = 1

    vol2 = np.zeros((100, 100, 100))
    for index in mpts:
        i, j, k = index
        vol2[i, j, k] = 1

    overlap = np.sum(np.logical_and(vol, vol2)) / float(np.sum(vol2))

    assert_equal(overlap * 100 > 40, True)
Esempio n. 15
0
def test_feature_resample():
    from dipy.tracking.streamline import set_number_of_points

    # Test subclassing Feature
    class ResampleFeature(dipymetric.Feature):
        def __init__(self, nb_points):
            super(ResampleFeature, self).__init__(is_order_invariant=False)
            self.nb_points = nb_points
            if nb_points <= 0:
                msg = ("ResampleFeature: `nb_points` must be strictly"
                       " positive: {0}").format(nb_points)
                raise ValueError(msg)

        def infer_shape(self, streamline):
            return (self.nb_points, streamline.shape[1])

        def extract(self, streamline):
            return set_number_of_points(streamline, self.nb_points)

    assert_raises(ValueError, dipymetric.ResampleFeature, nb_points=0)
    assert_raises(ValueError, ResampleFeature, nb_points=0)

    max_points = max(map(len, [s1, s2, s3, s4]))
    for nb_points in [2, 5, 2*max_points]:
        for feature in [dipymetric.ResampleFeature(nb_points),
                        ResampleFeature(nb_points)]:
            for s in [s1, s2, s3, s4]:
                # Test method infer_shape
                assert_equal(feature.infer_shape(s), (nb_points, s.shape[1]))

                # Test method extract
                features = feature.extract(s)
                assert_equal(features.shape, (nb_points, s.shape[1]))
                assert_array_almost_equal(features,
                                          set_number_of_points(s, nb_points))

            # This feature type is not order invariant
            assert_false(feature.is_order_invariant)
            for s in [s1, s2, s3, s4]:
                features = feature.extract(s)
                features_flip = feature.extract(s[::-1])
                assert_array_equal(features_flip,
                                   set_number_of_points(s[::-1], nb_points))
                assert_true(np.any(np.not_equal(features, features_flip)))
Esempio n. 16
0
def streamline_registration(moving, static, n_points=100,
                            native_resampled=False):
    """
    Register two collections of streamlines ('bundles') to each other

    Parameters
    ----------
    moving, static : lists of 3 by n, or str
        The two bundles to be registered. Given either as lists of arrays with
        3D coordinates, or strings containing full paths to these files.

    n_points : int, optional
        How many points to resample to. Default: 100.

    native_resampled : bool, optional
        Whether to return the moving bundle in the original space, but
        resampled in the static space to n_points.

    Returns
    -------
    aligned : list
        Streamlines from the moving group, moved to be closely matched to
        the static group.

    matrix : array (4, 4)
        The affine transformation that takes us from 'moving' to 'static'
    """
    # Load the streamlines, if you were given a file-name
    if isinstance(moving, str):
        moving = sut.read_trk(moving)
    if isinstance(static, str):
        static = sut.read_trk(static)

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=set_number_of_points(static, n_points),
                       moving=set_number_of_points(moving, n_points))

    aligned = srm.transform(moving)
    if native_resampled:
        aligned = set_number_of_points(aligned, n_points)
        aligned = move_streamlines(aligned, np.linalg.inv(srm.matrix))

    return aligned, srm.matrix
Esempio n. 17
0
def bench_quickbundles():
    dtype = "float32"
    repeat = 10
    nb_points = 12

    streams, hdr = nib.trackvis.read(get_fnames('fornix'))
    fornix = [s[0].astype(dtype) for s in streams]
    fornix = streamline_utils.set_number_of_points(fornix, nb_points)

    # Create eight copies of the fornix to be clustered (one in each octant).
    streamlines = []
    streamlines += [s + np.array([100, 100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([100, -100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([100, 100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([100, -100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, 100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, -100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, 100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, -100, -100], dtype) for s in fornix]

    # The expected number of clusters of the fornix using threshold=10 is 4.
    threshold = 10.
    expected_nb_clusters = 4 * 8

    print("Timing QuickBundles 1.0 vs. 2.0")

    qb = QB_Old(streamlines, threshold, pts=None)
    qb1_time = measure("QB_Old(streamlines, threshold, nb_points)", repeat)
    print("QuickBundles time: {0:.4}sec".format(qb1_time))
    assert_equal(qb.total_clusters, expected_nb_clusters)
    sizes1 = [qb.partitions()[i]['N'] for i in range(qb.total_clusters)]
    indices1 = [qb.partitions()[i]['indices']
                for i in range(qb.total_clusters)]

    qb2 = QB_New(threshold)
    qb2_time = measure("clusters = qb2.cluster(streamlines)", repeat)
    print("QuickBundles2 time: {0:.4}sec".format(qb2_time))
    print("Speed up of {0}x".format(qb1_time / qb2_time))
    clusters = qb2.cluster(streamlines)
    sizes2 = map(len, clusters)
    indices2 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(list(sizes2), sizes1)
    assert_arrays_equal(indices2, indices1)

    qb = QB_New(threshold, metric=MDFpy())
    qb3_time = measure("clusters = qb.cluster(streamlines)", repeat)
    print("QuickBundles2_python time: {0:.4}sec".format(qb3_time))
    print("Speed up of {0}x".format(qb1_time / qb3_time))
    clusters = qb.cluster(streamlines)
    sizes3 = map(len, clusters)
    indices3 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(list(sizes3), sizes1)
    assert_arrays_equal(indices3, indices1)
Esempio n. 18
0
def simulated_bundle(no_streamlines=10, waves=False, no_pts=12):
    t = np.linspace(-10, 10, 200)
    # parallel waves or parallel lines
    bundle = []
    for i in np.linspace(-5, 5, no_streamlines):
        if waves:
            pts = np.vstack((np.cos(t), t, i * np.ones(t.shape))).T
        else:
            pts = np.vstack((np.zeros(t.shape), t, i * np.ones(t.shape))).T
        pts = set_number_of_points(pts, no_pts)
        bundle.append(pts)

    return bundle
Esempio n. 19
0
    def _prepare_batch(self, indices):
        orig_streamlines, volume_ids = self.dataset[indices]
        streamlines = self._add_noise_to_streamlines(orig_streamlines.copy())

        streamlines._lengths = streamlines._lengths.astype("int64")
        if self.resample_streamlines:
            # streamline_length = np.max(streamlines._lengths)  # Sequences are resampled so that they have the same length.
            streamline_length = np.min(streamlines._lengths)  # Sequences are resampled so that they have the same length.
            streamlines = set_number_of_points(streamlines, nb_points=streamline_length)

        inputs = streamlines._data  # Streamlines coordinates
        targets = streamlines._data[1:] - streamlines._data[:-1]  # Unnormalized directions
        if self.normalize_target:
            targets = targets / np.sqrt(np.sum(targets**2, axis=1, keepdims=True))  # Normalized directions

        batch_size = len(streamlines)
        if self.use_augment_by_flipping:
            batch_size *= 2

        max_streamline_length = np.max(streamlines._lengths)  # Sequences are padded so that they have the same length.
        batch_masks = np.zeros((batch_size, max_streamline_length-1), dtype=floatX)
        batch_inputs = np.zeros((batch_size, max_streamline_length-1, inputs.shape[1]), dtype=floatX)
        batch_targets = np.zeros((batch_size, max_streamline_length-1, 3), dtype=floatX)
        # batch_volume_ids = np.zeros((batch_size, max_streamline_length-1, 1), dtype=floatX)

        for i, (offset, length, volume_id) in enumerate(zip(streamlines._offsets, streamlines._lengths, volume_ids)):
            batch_masks[i, :length-1] = 1
            batch_inputs[i, :length-1] = inputs[offset:offset+length-1]  # [0, 1, 2, 3, 4] => [0, 1, 2, 3]
            batch_targets[i, :length-1] = targets[offset:offset+length-1]  # [1-0, 2-1, 3-2, 4-3] => [1-0, 2-1, 3-2, 4-3]
            # batch_volume_ids[i, :length-1] = volume_id

            if self.use_augment_by_flipping:
                batch_masks[i+len(streamlines), :length-1] = 1
                batch_inputs[i+len(streamlines), :length-1] = inputs[offset+1:offset+length][::-1]  # [0, 1, 2, 3, 4] => [4, 3, 2, 1]
                batch_targets[i+len(streamlines), :length-1] = -targets[offset:offset+length-1][::-1]  # [1-0, 2-1, 3-2, 4-3] => [4-3, 3-2, 2-1, 1-0]
                # batch_volume_ids[i+len(streamlines), :length-1] = volume_id

        batch_volume_ids = np.tile(volume_ids[:, None, None], (1 + self.use_augment_by_flipping, max_streamline_length-1, 1))
        batch_inputs = np.concatenate([batch_inputs, batch_volume_ids], axis=2)  # Streamlines coords + dwi ID

        if self.feed_previous_direction:
            previous_directions = np.concatenate([np.zeros((batch_size, 1, 3), dtype=floatX), batch_targets[:, :-1]], axis=1)
            batch_inputs = np.concatenate([batch_inputs, previous_directions], axis=2)  # Streamlines coords + dwi ID + previous direction

        return batch_inputs, batch_targets, batch_masks
Esempio n. 20
0
def test_quickbundles_streamlines():
    rdata = streamline_utils.set_number_of_points(data, 10)
    qb = QuickBundles(threshold=2*threshold)

    clusters = qb.cluster(rdata)
    # By default `refdata` refers to data being clustered.
    assert_equal(clusters.refdata, rdata)
    # Set `refdata` to return indices instead of actual data points.
    clusters.refdata = None
    assert_array_equal(list(itertools.chain(*clusters)),
                       list(itertools.chain(*clusters_truth)))

    # Cluster read-only data
    for datum in rdata:
        datum.setflags(write=False)

    # Cluster data with different dtype (should be converted into float32)
    for datatype in [np.float64, np.int32, np.int64]:
        newdata = [datum.astype(datatype) for datum in rdata]
        clusters = qb.cluster(newdata)
        assert_equal(clusters.centroids[0].dtype, np.float32)
def auto_extract(model_cluster_map, submission_cluster_map,
                 number_pts_per_str=NB_POINTS_RESAMPLE,
                 close_centroids_thr=20,
                 clean_thr=7.):

    model_centroids = model_cluster_map.centroids

    centroid_matrix = bundles_distances_mdf(model_centroids,
                                            submission_cluster_map.centroids)

    centroid_matrix[centroid_matrix > close_centroids_thr] = np.inf
    mins = np.min(centroid_matrix, axis=0)
    close_clusters = [submission_cluster_map[i]
                      for i in np.where(mins != np.inf)[0]]
    close_indices_inter = [submission_cluster_map[i].indices
                           for i in np.where(mins != np.inf)[0]]
    close_indices = list(chain.from_iterable(close_indices_inter))

    close_streamlines = list(chain(*close_clusters))
    closer_streamlines = close_streamlines

    rcloser_streamlines = set_number_of_points(closer_streamlines,
                                               number_pts_per_str)

    clean_matrix = bundles_distances_mdf(model_cluster_map.refdata,
                                         rcloser_streamlines)

    clean_matrix[clean_matrix > clean_thr] = np.inf

    mins = np.min(clean_matrix, axis=0)

    clean_indices = [i for i in np.where(mins != np.inf)[0]]

    # Clean indices refer to the streamlines in closer_streamlines,
    # which are the same as the close_streamlines. Each close_streamline
    # has a related element in close_indices, for which the value
    # is the index of the original streamline in the moved_streamlines.
    final_selected_indices = [close_indices[idx] for idx in clean_indices]

    return final_selected_indices
Esempio n. 22
0
def _prepare_gt_bundles_info(bundles_dir, bundles_masks_dir,
                             gt_bundles_attribs, ref_anat_fname):
    # Ref bundles will contain {'name': 'name_of_the_bundle',
    #                           'threshold': thres_value,
    #                           'streamlines': list_of_streamlines}

    dummy_attribs = {'orientation': 'LPS'}
    qb = QuickBundles(20, metric=AveragePointwiseEuclideanMetric())

    ref_bundles = []

    for bundle_idx, bundle_f in enumerate(sorted(os.listdir(bundles_dir))):
        bundle_name = os.path.splitext(os.path.basename(bundle_f))[0]

        bundle_attribs = gt_bundles_attribs.get(os.path.basename(bundle_f))
        if bundle_attribs is None:
            raise ValueError(
                "Missing basic bundle attribs for {0}".format(bundle_f))

        # Already resample to avoid doing it for each iteration of chunking
        orig_strl = [s for s in get_tracts_voxel_space_for_dipy(
                        os.path.join(bundles_dir, bundle_f),
                        ref_anat_fname, dummy_attribs)]

        resamp_bundle = set_number_of_points(orig_strl, NB_POINTS_RESAMPLE)
        resamp_bundle = [s.astype('f4') for s in resamp_bundle]

        bundle_cluster_map = qb.cluster(resamp_bundle)
        bundle_cluster_map.refdata = resamp_bundle

        bundle_mask = nib.load(os.path.join(bundles_masks_dir,
                                            bundle_name + '.nii.gz'))

        ref_bundles.append({'name': bundle_name,
                            'threshold': bundle_attribs['cluster_threshold'],
                            'cluster_map': bundle_cluster_map,
                            'mask': bundle_mask})

    return ref_bundles
Esempio n. 23
0
cb_subj1, cb_subj2 = two_cingulum_bundles()

from dipy.align.streamlinear import StreamlineLinearRegistration
from dipy.tracking.streamline import set_number_of_points


"""
An important step before running the registration is to resample the
streamlines so that they both have the same number of points per streamline.
Here we will use 20 points. This step is not optional. Inputting streamlines
with different number of points will break the theoretical advantages of using
the SLR as explained in [Garyfallidis15]_.
"""

cb_subj1 = set_number_of_points(cb_subj1, 20)
cb_subj2 = set_number_of_points(cb_subj2, 20)

"""
Let's say now that we want to move the ``cb_subj2`` (moving) so that it can be
aligned with ``cb_subj1`` (static). Here is how this is done.
"""

srr = StreamlineLinearRegistration()

srm = srr.optimize(static=cb_subj1, moving=cb_subj2)

"""
After the optimization is finished we can apply the transformation to
``cb_subj2``.
"""
Esempio n. 24
0
    def _register_neighb_to_model(self,
                                  model_bundle,
                                  neighb_streamlines,
                                  metric=None,
                                  x0=None,
                                  bounds=None,
                                  select_model=400,
                                  select_target=600,
                                  method='L-BFGS-B',
                                  nb_pts=20):

        if self.verbose:
            print('# Local SLR of neighb_streamlines to model')
            t = time()

        if metric is None or metric == 'symmetric':
            metric = BundleMinDistanceMetric()
        if metric == 'asymmetric':
            metric = BundleMinDistanceAsymmetricMetric()
        if metric == 'diagonal':
            metric = BundleSumDistanceMatrixMetric()

        if x0 is None:
            x0 = 'similarity'

        if bounds is None:
            bounds = [(-30, 30), (-30, 30), (-30, 30), (-45, 45), (-45, 45),
                      (-45, 45), (0.8, 1.2)]

        # TODO this can be speeded up by using directly the centroids
        static = select_random_set_of_streamlines(model_bundle,
                                                  select_model,
                                                  rng=self.rng)
        moving = select_random_set_of_streamlines(neighb_streamlines,
                                                  select_target,
                                                  rng=self.rng)

        static = set_number_of_points(static, nb_pts)
        moving = set_number_of_points(moving, nb_pts)

        slr = StreamlineLinearRegistration(metric=metric,
                                           x0=x0,
                                           bounds=bounds,
                                           method=method)
        slm = slr.optimize(static, moving)

        transf_streamlines = neighb_streamlines.copy()
        transf_streamlines._data = apply_affine(slm.matrix,
                                                transf_streamlines._data)

        transf_matrix = slm.matrix
        slr_bmd = slm.fopt
        slr_iterations = slm.iterations

        if self.verbose:
            print(' Square-root of BMD is %.3f' % (np.sqrt(slr_bmd), ))
            if slr_iterations is not None:
                print(' Number of iterations %d' % (slr_iterations, ))
            print(' Matrix size {}'.format(slm.matrix.shape))
            original = np.get_printoptions()
            np.set_printoptions(3, suppress=True)
            print(transf_matrix)
            print(slm.xopt)
            np.set_printoptions(**original)

            print(' Duration %0.3f sec. \n' % (time() - t, ))

        return transf_streamlines, slr_bmd
Esempio n. 25
0
def main(args=None):

    # fname = get_fnames('fornix')
    #
    # fornix = load_tractogram(fname, 'same', bbox_valid_check=False)
    # streamlines = fornix.streamlines
    #
    # temp_data = streamlines.data[0]
    #
    # print(temp_data)
    #
    # print(len(streamlines.data))
    #
    # qb = QuickBundles(threshold=10.)
    # clusters = qb.cluster(streamlines)

    filepath = '/home/venkatesh/Desktop/Vehicle_counting_pipeline (extract.me)/Results/track_history/cam_7_dawn.csv'
    track_information = read_track_trajectories(filepath)

    plt.figure(figsize=(6, 6))

    obj_trajectory = []
    x_pos = []
    y_pos = []
    data = []
    for obj_id in track_information:
        cord_points = np.array(track_information[obj_id]['traj_point'])

        # data = []
        # x_pos = []
        # y_pos = []
        for index in range(len(cord_points)):
            x_pos.append(cord_points[index, 0])
            y_pos.append(cord_points[index, 1])
            data.append([cord_points[index, 0], cord_points[index, 1]])


        obj_trajectory.append(np.asarray(data, dtype = 'int32'))

    # print(obj_trajectory[0])

    # qb = QuickBundles(threshold=4.)
    streamlines = set_number_of_points(obj_trajectory, nb_points=50)

    # print(streamlines[0])
    # clusters = qb.cluster(streamlines)

    # Streamlines will be resampled to 24 points on the fly.
    # feature = ResampleFeature(nb_points=24)
    # metric = AveragePointwiseEuclideanMetric(feature=streamlines)  # a.k.a. MDF
    qb = QuickBundles(threshold=200.)
    clusters = qb.cluster(streamlines)

    print("Nb. clusters:", len(clusters))
    print("Cluster sizes:", list(map(len, clusters)))

    # Enables/disables interactive visualization
    interactive = False

    ren = window.Renderer()
    ren.SetBackground(1, 1, 1)
    ren.add(actor.streamtube(streamlines, window.colors.white))
    window.record(ren, out_path='fornix_initial.png', size=(600, 600))
    if interactive:
        window.show(ren)
    # wcss = []
    # for i in range(1, 11):
    #     kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
    #     kmeans.fit(streamlines)
    #     wcss.append(kmeans.inertia_)
    # plt.plot(range(1, 11), wcss)
    # plt.title('Elbow Method')
    # plt.xlabel('Number of clusters')
    # plt.ylabel('WCSS')
    # plt.show()
    #
    # kmeans = KMeans(n_clusters=4, init='k-means++', max_iter=300, n_init=10, random_state=0)
    # pred_y = kmeans.fit_predict(obj_trajectory)
    # plt.plot(cord_points[:, 0], cord_points[:, 1])
    # plt.plot(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1])
    # # plt.plot(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red')
    # plt.show()

    return 0
Esempio n. 26
0
    def _prepare_batch(self, indices):
        orig_streamlines, volume_ids = self.dataset[indices]
        streamlines = self._add_noise_to_streamlines(orig_streamlines.copy())

        streamlines._lengths = streamlines._lengths.astype("int64")
        if self.resample_streamlines:
            # streamline_length = np.max(streamlines._lengths)  # Sequences are resampled so that they have the same length.
            streamline_length = np.min(
                streamlines._lengths
            )  # Sequences are resampled so that they have the same length.
            streamlines = set_number_of_points(streamlines,
                                               nb_points=streamline_length)

        inputs = streamlines._data  # Streamlines coordinates
        targets = streamlines._data[
            1:] - streamlines._data[:-1]  # Unnormalized directions
        if self.normalize_target:
            targets = targets / np.sqrt(
                np.sum(targets**2, axis=1,
                       keepdims=True))  # Normalized directions

        actual_batch_size = sum(map(lambda x: len(x) - 1, streamlines))
        if self.use_augment_by_flipping:
            half_batch_size = actual_batch_size
            actual_batch_size *= 2

        inputs_shape = inputs.shape[1]
        if self.feed_previous_direction:
            inputs_shape *= 2

        batch_inputs = np.zeros((actual_batch_size, inputs_shape),
                                dtype=floatX)
        batch_targets = np.zeros((actual_batch_size, 3), dtype=floatX)
        batch_array_index = 0

        for i, (offset, length, volume_id) in enumerate(
                zip(streamlines._offsets, streamlines._lengths, volume_ids)):

            start = batch_array_index
            end = batch_array_index + (length - 1)
            batch_array_index += length - 1

            batch_inputs[start:end, :3] = inputs[
                offset:offset + length - 1]  # [0, 1, 2, 3, 4] => [0, 1, 2, 3]
            batch_targets[start:end] = targets[
                offset:offset + length -
                1]  # [1-0, 2-1, 3-2, 4-3] => [1-0, 2-1, 3-2, 4-3]

            if self.feed_previous_direction:
                batch_inputs[start, 3:] = np.zeros((1, 3))
                previous_directions = batch_targets[start:end - 1]
                batch_inputs[
                    start + 1:end, 3:] = previous_directions / np.sqrt(
                        np.sum(previous_directions**2, axis=1, keepdims=True) +
                        1e-6)  # Normalized directions

            if self.use_augment_by_flipping:
                flipped_start = start + half_batch_size
                flipped_end = end + half_batch_size
                batch_inputs[flipped_start:flipped_end, :3] = inputs[
                    offset + 1:offset +
                    length][::-1]  # [0, 1, 2, 3, 4] => [4, 3, 2, 1]
                batch_targets[flipped_start:flipped_end] = -targets[
                    offset:offset + length -
                    1][::-1]  # [1-0, 2-1, 3-2, 4-3] => [4-3, 3-2, 2-1, 1-0]

                if self.feed_previous_direction:
                    batch_inputs[flipped_start, 3:] = np.zeros((1, 3))
                    previous_directions = batch_targets[
                        flipped_start:flipped_end - 1]
                    batch_inputs[flipped_start + 1:flipped_end,
                                 3:] = previous_directions / np.sqrt(
                                     np.sum(previous_directions**2,
                                            axis=1,
                                            keepdims=True) +
                                     1e-6)  # Normalized directions

        batch_volume_ids = np.repeat(
            volume_ids, list(map(lambda x: len(x) - 1, streamlines)))
        if self.use_augment_by_flipping:
            batch_volume_ids = np.tile(batch_volume_ids, [2])

        # Add dwi ID.
        if self.feed_previous_direction:
            batch_inputs = np.concatenate(
                [
                    batch_inputs[:, :3], batch_volume_ids[:, None],
                    batch_inputs[:, 3:]
                ],
                axis=1)  # Streamlines coords + dwi ID + previous direction
        else:
            batch_inputs = np.concatenate(
                [batch_inputs, batch_volume_ids[:, None]],
                axis=1)  # Streamlines coords + dwi ID

        return batch_inputs, batch_targets
Esempio n. 27
0
from dipy.tracking.streamline import set_number_of_points

subj = all_subj_folders
names = all_subj_names
s = subj[10]
n = names[10]
folder_name = subj_folder + s
nii_file = rf'{subj_folder}{s}\diff_corrected_b2000_masked_T2.nii'
hardi_img = nib.load(nii_file)
affine = hardi_img.affine
tract_file = load_trk(rf'{subj_folder}{s}\streamlines\Tracts_CC_ARC.trk',
                      "same")
affine1, dimensions1, voxel_sizes1, voxel_order1 = tract_file.space_attributes

str1 = tract_file.streamlines
str1 = set_number_of_points(str1, 30)

weight_by = rf'rr{n[1:]}_ADD_along_streamlines'

weight_by_data, affine2 = load_weight_by_img(folder_name, weight_by)
streamlines = transform_streamlines(str1, np.linalg.inv(affine1))

hue = [0.25, -0.05]
saturation = [0, 1]
scale = [3, 12]

mean_pasi_weighted_img = f'{folder_name}{os.sep}streamlines{os.sep}CC_3-12_Exp_DTI_PreReg_along_1.png'

lut_cmap = actor.colormap_lookup_table(hue_range=hue,
                                       saturation_range=saturation,
                                       scale_range=scale)
Esempio n. 28
0
def gaussian_weights(bundle, n_points=100, return_mahalnobis=False,
                     stat=np.mean):
    """
    Calculate weights for each streamline/node in a bundle, based on a
    Mahalanobis distance from the core the bundle, at that node (mean, per
    default).

    Parameters
    ----------
    bundle : Streamlines
        The streamlines to weight.
    n_points : int, optional
        The number of points to resample to. *If the `bundle` is an array, this
        input is ignored*. Default: 100.

    Returns
    -------
    w : array of shape (n_streamlines, n_points)
        Weights for each node in each streamline, calculated as its relative
        inverse of the Mahalanobis distance, relative to the distribution of
        coordinates at that node position across streamlines.
    """
    # Resample to same length for each streamline:
    bundle = set_number_of_points(bundle, n_points)

    # This is the output
    w = np.zeros((len(bundle), n_points))

    # If there's only one fiber here, it gets the entire weighting:
    if len(bundle) == 1:
        if return_mahalnobis:
            return np.array([np.nan])
        else:
            return np.array([1])

    for node in range(n_points):
        # This should come back as a 3D covariance matrix with the spatial
        # variance covariance of this node across the different streamlines
        # This is a 3-by-3 array:
        node_coords = bundle.data[node::n_points]
        c = np.cov(node_coords.T, ddof=0)
        # Reorganize as an upper diagonal matrix for expected Mahalanobis
        # input:
        c = np.array([[c[0, 0], c[0, 1], c[0, 2]],
                      [0, c[1, 1], c[1, 2]],
                      [0, 0, c[2, 2]]])
        # Calculate the mean or median of this node as well
        # delta = node_coords - np.mean(node_coords, 0)
        m = stat(node_coords, 0)
        # Weights are the inverse of the Mahalanobis distance
        for fn in range(len(bundle)):
            # In the special case where all the streamlines have the exact same
            # coordinate in this node, the covariance matrix is all zeros, so
            # we can't calculate the Mahalanobis distance, we will instead give
            # each streamline an identical weight, equal to the number of
            # streamlines:
            if np.allclose(c, 0):
                w[:, node] = len(bundle)
                break
            # Otherwise, go ahead and calculate Mahalanobis for node on
            # fiber[fn]:
            w[fn, node] = mahalanobis(node_coords[fn], m, np.linalg.inv(c))
    if return_mahalnobis:
        return w
    # weighting is inverse to the distance (the further you are, the less you
    # should be weighted)
    w = 1 / w
    # Normalize before returning, so that the weights in each node sum to 1:
    return w / np.sum(w, 0)
Esempio n. 29
0
def horizon_flow(input_files, noisy_streamlines_sigma=0., verbose=True):
    """ Horizon

    Parameters
    ----------
    input_files : variable string
    cluster : bool, optional
    cluster_thr : float, optional
    random_colors : bool, optional
    verbose : bool, optional
    length_lt : float, optional
    length_gt : float, optional
    clusters_lt : int, optional
    clusters_gt : int, optional
    noisy_streamlines_sigma : float, optional
    """

    filenames = input_files
    # glob(input_files)
    tractograms = []

    data = None
    affine = None
    for i, f in enumerate(filenames):
        if verbose:
            print('Loading file ...')
            print(f)
            print('\n')

        if f.endswith('.trk') or f.endswith('.tck'):
            streamlines = nib.streamlines.load(f).streamlines
            idx = np.arange(len(streamlines))
            rng = np.random.RandomState(42)
            rng.shuffle(idx)
            streamlines = streamlines[idx[:100]]

            if noisy_streamlines_sigma > 0. and i > 0:
                streamlines = add_noise_to_streamlines(
                    streamlines, noisy_streamlines_sigma)

            tractograms.append(streamlines)

        if f.endswith('.npz'):
            tractography_data = TractographyData.load(f)
            # idx = np.arange(len(tractography_data.streamlines))
            # rng = np.random.RandomState(42)
            # rng.shuffle(idx)
            # tractography_data.streamlines = tractography_data.streamlines[idx[:200]]
            # tractograms.append(tractography_data.streamlines)

            # Take M streamlines per bundle, but increase the value if there is only 1 bundle (i.e. whole brain)
            bundle_names = sorted(tractography_data.name2id.keys())
            M = 200 if len(bundle_names) > 1 else 10000
            for k in bundle_names:
                bundle_id = tractography_data.name2id[k]
                bundle_streamlines = tractography_data.streamlines[
                    tractography_data.bundle_ids == bundle_id]
                indices = np.random.choice(len(bundle_streamlines), M)
                streamlines = bundle_streamlines[indices].copy()
                streamlines._lengths = streamlines._lengths.astype("int64")
                streamlines = set_number_of_points(streamlines, nb_points=40)
                tractograms.append(streamlines)

            if hasattr(tractography_data, 'signal'):
                signal = tractography_data.signal.get_data()
                data = signal[:, :, :, 0]
                affine = np.eye(4)

        if f.endswith('.nii.gz') or f.endswith('.nii'):

            img = nib.load(f)
            data = img.get_data()
            affine = img.get_affine()
            if verbose:
                print(affine)

    # tmp save
    # tractogram = nib.streamlines.Tractogram(tractograms[0])
    # tractogram.apply_affine(img.affine)
    # nib.streamlines.save(tractogram, "tmp.tck")
    # exit()

    horizon(tractograms, data, affine)
Esempio n. 30
0
def gaussian_weights(bundle, n_points=100, return_mahalnobis=False,
                     stat=np.mean):
    """
    Calculate weights for each streamline/node in a bundle, based on a
    Mahalanobis distance from the core the bundle, at that node (mean, per
    default).

    Parameters
    ----------
    bundle : Streamlines
        The streamlines to weight.
    n_points : int, optional
        The number of points to resample to. *If the `bundle` is an array, this
        input is ignored*. Default: 100.

    Returns
    -------
    w : array of shape (n_streamlines, n_points)
        Weights for each node in each streamline, calculated as its relative
        inverse of the Mahalanobis distance, relative to the distribution of
        coordinates at that node position across streamlines.
    """
    # Resample to same length for each streamline:
    bundle = set_number_of_points(bundle, n_points)

    # This is the output
    w = np.zeros((len(bundle), n_points))

    # If there's only one fiber here, it gets the entire weighting:
    if len(bundle) == 1:
        if return_mahalnobis:
            return np.array([np.nan])
        else:
            return np.array([1])

    for node in range(n_points):
        # This should come back as a 3D covariance matrix with the spatial
        # variance covariance of this node across the different streamlines
        # This is a 3-by-3 array:
        node_coords = bundle.data[node::n_points]
        c = np.cov(node_coords.T, ddof=0)
        # Reorganize as an upper diagonal matrix for expected Mahalnobis input:
        c = np.array([[c[0, 0], c[0, 1], c[0, 2]],
                      [0, c[1, 1], c[1, 2]],
                      [0, 0, c[2, 2]]])
        # Calculate the mean or median of this node as well
        # delta = node_coords - np.mean(node_coords, 0)
        m = stat(node_coords, 0)
        # Weights are the inverse of the Mahalanobis distance
        for fn in range(len(bundle)):
            # In the special case where all the streamlines have the exact same
            # coordinate in this node, the covariance matrix is all zeros, so
            # we can't calculate the Mahalnobis distance, we will instead give
            # each streamline an identical weight, equal to the number of
            # streamlines:
            if np.allclose(c, 0):
                w[:, node] = len(bundle)
                break
            # Otherwise, go ahead and calculate Mahalanobis for node on
            # fiber[fn]:
            w[fn, node] = mahalanobis(node_coords[fn], m, np.linalg.inv(c))
    if return_mahalnobis:
        return w
    # weighting is inverse to the distance (the further you are, the less you
    # should be weighted)
    w = 1 / w
    # Normalize before returning, so that the weights in each node sum to 1:
    return w / np.sum(w, 0)
Esempio n. 31
0
def score_from_files(filename,
                     masks_dir,
                     bundles_dir,
                     tracts_attribs,
                     basic_bundles_attribs,
                     save_segmented=False,
                     save_IBs=False,
                     save_VBs=False,
                     save_VCWPs=False,
                     segmented_out_dir='',
                     segmented_base_name='',
                     verbose=False):
    """
    Computes all metrics in order to score a tractogram.

    Given a ``tck`` file of streamlines and a folder containing masks,
    compute the percent of: Valid Connections (VC), Invalid Connections (IC),
    Valid Connections but Wrong Path (VCWP), No Connections (NC),
    Average Bundle Coverage (ABC), Average ROIs Coverage (ARC),
    coverage per bundles and coverage per ROIs. It also provides the number of:
    Valid Bundles (VB), Invalid Bundles (IB) and streamlines per bundles.


    Parameters
    ------------
    filename : str
       name of a tracts file
    masks_dir : str
       name of the directory containing the masks
    save_segmented : bool
        if true, saves the segmented VC, IC, VCWP and NC

    Returns
    ---------
    scores : dict
        dictionnary containing a score for each metric
    indices : dict
        dictionnary containing the indices of streamlines composing VC, IC,
        VCWP and NC

    """
    if verbose:
        logging.basicConfig(level=logging.DEBUG)

    rois_dir = masks_dir + "rois/"
    bundles_masks_dir = masks_dir + "bundles/"
    wm_file = masks_dir + "wm.nii.gz"
    wm = nib.load(wm_file)

    streamlines = load_streamlines(filename, wm_file, tracts_attribs)

    ROIs = [nib.load(rois_dir + f) for f in sorted(os.listdir(rois_dir))]
    bundles_masks = [
        nib.load(bundles_masks_dir + f)
        for f in sorted(os.listdir(bundles_masks_dir))
    ]
    ref_bundles = []

    # Ref bundles will contain {'name': 'name_of_the_bundle', 'threshold': thres_value,
    #                           'streamlines': list_of_streamlines}
    dummy_attribs = {'orientation': 'LPS'}
    qb = QuickBundles(threshold=REF_BUNDLES_THRESHOLD,
                      metric=AveragePointwiseEuclideanMetric())

    out_centroids_dir = os.path.join(segmented_out_dir, os.path.pardir,
                                     "centroids")
    if not os.path.isdir(out_centroids_dir):
        os.mkdir(out_centroids_dir)

    rng = np.random.RandomState(42)

    for bundle_idx, bundle_f in enumerate(sorted(os.listdir(bundles_dir))):
        bundle_attribs = basic_bundles_attribs.get(os.path.basename(bundle_f))
        if bundle_attribs is None:
            raise ValueError(
                "Missing basic bundle attribs for {0}".format(bundle_f))

        # # Already resample to avoid doing it for each iteration of chunking
        # orig_strl = [s for s in get_tracts_voxel_space_for_dipy(
        #                         os.path.join(bundles_dir, bundle_f),
        #                         wm_file, dummy_attribs)]
        orig_strl = load_streamlines(os.path.join(bundles_dir, bundle_f),
                                     wm_file, dummy_attribs)
        resamp_bundle = set_number_of_points(orig_strl, NB_POINTS_RESAMPLE)
        # resamp_bundle = [s.astype('f4') for s in resamp_bundle]

        indices = np.arange(len(resamp_bundle))
        rng.shuffle(indices)
        bundle_cluster_map = qb.cluster(resamp_bundle, ordering=indices)

        # bundle_cluster_map.refdata = resamp_bundle

        bundle_mask_inv = nib.Nifti1Image(
            (1 - bundles_masks[bundle_idx].get_data()) * wm.get_data(),
            bundles_masks[bundle_idx].get_affine())

        ref_bundles.append({
            'name':
            os.path.basename(bundle_f).replace('.fib', '').replace('.tck', ''),
            'threshold':
            bundle_attribs['cluster_threshold'],
            'cluster_map':
            bundle_cluster_map,
            'mask':
            bundles_masks[bundle_idx],
            'mask_inv':
            bundle_mask_inv
        })

        logging.debug("{}: {} centroids".format(ref_bundles[-1]['name'],
                                                len(bundle_cluster_map)))
        nib.streamlines.save(
            nib.streamlines.Tractogram(bundle_cluster_map.centroids,
                                       affine_to_rasmm=np.eye(4)),
            os.path.join(out_centroids_dir, ref_bundles[-1]['name'] + ".tck"))

    score_func = score_auto_extract_auto_IBs

    return score_func(streamlines,
                      bundles_masks,
                      ref_bundles,
                      ROIs,
                      wm,
                      save_segmented=save_segmented,
                      save_IBs=save_IBs,
                      save_VBs=save_VBs,
                      save_VCWPs=save_VCWPs,
                      out_segmented_strl_dir=segmented_out_dir,
                      base_out_segmented_strl=segmented_base_name,
                      ref_anat_fname=wm_file)
Esempio n. 32
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    # Make sure the colors are consistent between executions
    if args.random_coloring is not None:
        random.seed(int(args.random_coloring))

    # Handle bundle filenames. 3 cases are possible:
    # A list of files was passed as arguments
    # A directory was passed as argument
    # A single-file or path containing wildcard was specified
    bundle_filenames = args.in_bundles
    if len(args.in_bundles) == 1:
        # If only one file is specified, it may be a whole folder or
        # a single file, or a wildcard usage
        in_path = args.in_bundles[0]
        if os.path.isdir(in_path):
            # Load the folder
            bundle_filenames = [
                os.path.join(in_path, f) for f in os.listdir(in_path)
            ]
        else:
            # Load the file/wildcard
            bundle_filenames = glob.glob(in_path)

    assert_inputs_exist(parser, bundle_filenames, args.color_dict)

    scene = window.Scene()
    scene.background(tuple(map(int, args.background)))

    def subsample(list_obj):
        """ Lazily subsample a list
        """
        return list(itertools.islice(list_obj, 0, None, args.subsample))

    # Load each bundle, subsample and downsample it if needed
    for filename in bundle_filenames:
        try:
            # Lazy-load streamlines to minimize ram usage
            tractogram_gen = nib.streamlines.load(filename,
                                                  lazy_load=True).tractogram
            streamlines_gen = tractogram_gen.streamlines
        except ValueError:
            # Not a file loadable by nibabel's streamline API
            print('Skipping {}'.format(filename))
            continue

        # Actually load streamlines according to the subsample argument
        streamlines = subsample(streamlines_gen)

        if args.downsample:
            streamlines = set_number_of_points(streamlines, args.downsample)

        # Handle bundle colors. Either assign a random bright color to each
        # bundle, or load a color specific to each bundle, or let the bundles
        # be colored according to their local orientation
        if args.random_coloring:
            color = random_rgb()
        elif args.color_dict:
            with open(args.color_dict) as json_file:
                # Color dictionary
                color_dict = json.load(json_file)

                # Extract filenames to compare against the color dictionary
                basename = os.path.splitext(os.path.basename(filename))[0]

                # Load colors
                color = color_dict[basename] \
                    if basename in color_dict.keys() \
                    else color_dict['default']
        elif args.color_from_streamlines:
            color = subsample(tractogram_gen.data_per_streamline[
                args.color_from_streamlines])
        elif args.color_from_points:
            color = subsample(
                tractogram_gen.data_per_point[args.color_from_points])
        elif args.uniform_coloring:  # Assign uniform coloring to streamlines
            color = tuple(map(int, args.uniform_coloring))
        elif args.local_coloring:  # Compute coloring from local orientations
            # Compute segment orientation
            diff = [np.diff(list(s), axis=0) for s in streamlines]
            # Repeat first segment so that the number of segments matches
            # the number of points
            diff = [[d[0]] + list(d) for d in diff]
            # Flatten the list of segments
            orientations = np.asarray([o for d in diff for o in d])
            # Turn the segments into colors
            color = colormap.orient2rgb(orientations)
        else:  # Streamline color will depend on the streamlines' endpoints.
            color = None
        # TODO: Coloring from a volume of local orientations

        line_actor = streamline_actor[args.shape](streamlines,
                                                  colors=color,
                                                  linewidth=args.width)
        scene.add(line_actor)

    # If there's actually streamlines to display
    if len(bundle_filenames):
        # Showtime !
        showm = window.ShowManager(scene, reset_camera=True)
        showm.initialize()
        showm.start()
def get_data(in_fn, out_fn, mean, sdev):
    # Load TOM volume
    tom = nib.load(in_fn).get_data()  # 144 x 144 x 144 x 3

    # Preprocess input
    tom = (tom - mean) / sdev  # normalise based on dataset mean/stdev
    """
    do_flip_X = False if random.randint(0,1) == 0 else True
    do_flip_Y = False if random.randint(0,1) == 0 else True
    do_flip_Z = False if random.randint(0,1) == 0 else True
    if do_flip_X:
        tom = tom[::-1,:,:]
    if do_flip_Y:
        tom = tom[:,::-1,:]
    if do_flip_Z:
        tom = tom[:,:,::-1]
    """
    tom = torch.from_numpy(np.float32(tom))
    tom = tom.permute(3, 0, 1, 2)  # channels first for pytorch

    # Load the tractogram
    streamlines, header = trackvis.read(out_fn)
    streamlines = [s[0] for s in streamlines]

    # Preprocess the streamlines
    streamlines = set_number_of_points(streamlines, num_points)
    streamlines = np.array(streamlines)
    if len(streamlines) < num_streamlines:
        temp_streamlines = np.zeros((num_streamlines, num_points, 3))
        temp_streamlines[:streamlines.shape[0], :streamlines.
                         shape[1], :streamlines.shape[2]] = streamlines
        streamlines = np.float32(temp_streamlines)

    # Convert to relative format
    seeds = [sl[0].copy() for sl in streamlines]
    for i in range(len(streamlines)):
        streamlines[i] -= seeds[i]

    # Sort seeds and streamlines by seed points x, then y, then z of seeds
    streamlines = list(streamlines)
    streamlines = [
        x for _, x in sorted(
            zip(seeds, streamlines),
            key=lambda pair: [pair[0][0], pair[0][1], pair[0][2]])
    ]
    seeds = sorted(seeds, key=lambda k: [k[0], k[1], k[2]])

    # automatically converts list to numpy array and reshapes it
    streamlines = np.reshape(streamlines,
                             (int(num_streamlines**(1 / 2)),
                              int(num_streamlines**(1 / 2)), num_points * 3))
    tractogram = torch.from_numpy(streamlines)
    tractogram = tractogram.permute(2, 0, 1)  # channels first for pytorch

    # automatically converts list to numpy array and reshapes it
    seeds = np.reshape(
        seeds,
        (int(num_streamlines**(1 / 2)), int(num_streamlines**(1 / 2)), 3))
    seeds = torch.from_numpy(seeds)
    seeds = seeds.permute(2, 0, 1)

    return [tom, [seeds, tractogram]]
Esempio n. 34
0
def qbx_and_merge(streamlines, thresholds,
                  nb_pts=20, select_randomly=None, rng=None, verbose=False):
    """ Run QuickBundlesX and then run again on the centroids of the last layer

    Running again QuickBundles at a layer has the effect of merging
    some of the clusters that maybe originally devided because of branching.
    This function help obtain a result at a QuickBundles quality but with
    QuickBundlesX speed. The merging phase has low cost because it is applied
    only on the centroids rather than the entire dataset.

    Parameters
    ----------
    streamlines : Streamlines
    thresholds : sequence
        List of distance thresholds for QuickBundlesX.
    nb_pts : int
        Number of points for discretizing each streamline
    select_randomly : int
        Randomly select a specific number of streamlines. If None all the
        streamlines are used.
    rng : RandomState
        If None then RandomState is initialized internally.
    verbose : bool, optional.
        If True, log information. Default False.
    Returns
    -------
    clusters : obj
        Contains the clusters of the last layer of QuickBundlesX after merging.

    References
    ----------
    .. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
                        tractography simplification, Frontiers in Neuroscience,
                        vol 6, no 175, 2012.

    .. [Garyfallidis16] Garyfallidis E. et al. QuickBundlesX: Sequential
                        clustering of millions of streamlines in multiple
                        levels of detail at record execution time. Proceedings
                        of the, International Society of Magnetic Resonance
                        in Medicine (ISMRM). Singapore, 4187, 2016.
    """
    t = time()
    len_s = len(streamlines)
    if select_randomly is None:
        select_randomly = len_s

    if rng is None:
        rng = np.random.RandomState()
    indices = rng.choice(len_s, min(select_randomly, len_s),
                         replace=False)
    sample_streamlines = set_number_of_points(streamlines, nb_pts)

    if verbose:
        logger.info(' Resampled to {} points'.format(nb_pts))
        logger.info(' Size is %0.3f MB' % (nbytes(sample_streamlines),))
        logger.info(' Duration of resampling is %0.3f sec.' % (time() - t,))
        logger.info(' QBX phase starting...')

    qbx = QuickBundlesX(thresholds,
                        metric=AveragePointwiseEuclideanMetric())

    t1 = time()
    qbx_clusters = qbx.cluster(sample_streamlines, ordering=indices)

    if verbose:
        logger.info(' Merging phase starting ...')

    qbx_merge = QuickBundlesX([thresholds[-1]],
                              metric=AveragePointwiseEuclideanMetric())

    final_level = len(thresholds)
    len_qbx_fl = len(qbx_clusters.get_clusters(final_level))
    qbx_ordering_final = rng.choice(len_qbx_fl, len_qbx_fl, replace=False)

    qbx_merged_cluster_map = qbx_merge.cluster(
        qbx_clusters.get_clusters(final_level).centroids,
        ordering=qbx_ordering_final).get_clusters(1)

    qbx_cluster_map = qbx_clusters.get_clusters(final_level)

    merged_cluster_map = ClusterMapCentroid()
    for cluster in qbx_merged_cluster_map:
        merged_cluster = ClusterCentroid(centroid=cluster.centroid)
        for i in cluster.indices:
            merged_cluster.indices.extend(qbx_cluster_map[i].indices)
        merged_cluster_map.add_cluster(merged_cluster)

    merged_cluster_map.refdata = streamlines

    if verbose:
        logger.info(' QuickBundlesX time for %d random streamlines'
                    % (select_randomly,))

        logger.info(' Duration %0.3f sec. \n' % (time() - t1,))

    return merged_cluster_map
                    grouping = extract_grouping(grouping_xlsxpath, index_to_struct, None, verbose=verbose)
                else:
                    if allow_preprun:
                        M, grouping = connectivity_matrix_func(trkdata.streamlines, function_processes, labelmask,
                                                               symmetric=True, mapping_as_streamlines=False,
                                                               affine_streams=trkdata.space_attributes[0],
                                                               inclusive=inclusive)
                        M_grouping_excel_save(M, grouping, M_xlsxpath, grouping_xlsxpath, index_to_struct,
                                              verbose=False)
                    else:
                        print(f'skipping subject {subject} for now as grouping file is not calculated. Best rerun it afterwards ^^')
                        continue

                target_streamlines_list = grouping[target_tuple[0], target_tuple[1]]
                target_streamlines = trkdata.streamlines[target_streamlines_list]
                target_streamlines_set = set_number_of_points(target_streamlines, nb_points=num_points2)
                #del(target_streamlines, trkdata)
                target_qb = QuickBundles(threshold=distance1, metric=metric1)

                for ref in references:
                    ref_img_path = get_diff_ref(ref_MDT_folder, subject, ref)
                    ref_data, ref_affine = load_nifti(ref_img_path)

                    from dipy.tracking._utils import (_mapping_to_voxel, _to_voxel_coordinates)
                    from collections import defaultdict, OrderedDict
                    from itertools import combinations, groupby

                    edges = np.ndarray(shape=(3, 0), dtype=int)
                    lin_T, offset = _mapping_to_voxel(trkdata.space_attributes[0])
                    stream_ref = []
                    stream_point_ref = []
Esempio n. 36
0
def afq_profile(data, bundle, affine, n_points=100,
                orient_by=None, weights=None, **weights_kwarg):
    """
    Calculates a summarized profile of data for a bundle or tract
    along its length.

    Follows the approach outlined in [Yeatman2012]_.

    Parameters
    ----------
    data : 3D volume
        The statistic to sample with the streamlines.

    bundle : StreamLines class instance
        The collection of streamlines (possibly already resampled into an array
         for each to have the same length) with which we are resampling. See
         Note below about orienting the streamlines.
    affine : array_like (4, 4)
        The mapping from voxel coordinates to streamline points.
        The voxel_to_rasmm matrix, typically from a NIFTI file.
    n_points: int, optional
        The number of points to sample along the bundle. Default: 100.
    orient_by: streamline, optional.
        A streamline to use as a standard to orient all of the streamlines in
        the bundle according to.
    weights : 1D array or 2D array or callable (optional)
        Weight each streamline (1D) or each node (2D) when calculating the
        tract-profiles. Must sum to 1 across streamlines (in each node if
        relevant). If callable, this is a function that calculates weights.
    weights_kwarg : key-word arguments
        Additional key-word arguments to pass to the weight-calculating
        function. Only to be used if weights is a callable.

    Returns
    -------
    ndarray : a 1D array with the profile of `data` along the length of
        `bundle`

    Notes
    -----
    Before providing a bundle as input to this function, you will need to make
    sure that the streamlines in the bundle are all oriented in the same
    orientation relative to the bundle (use :func:`orient_by_streamline`).

    References
    ----------
    .. [Yeatman2012] Yeatman, Jason D., Robert F. Dougherty,
       Nathaniel J. Myall, Brian A. Wandell, and Heidi M. Feldman. 2012.
       "Tract Profiles of White Matter Properties: Automating Fiber-Tract
       Quantification" PloS One 7 (11): e49790.

    """
    if orient_by is not None:
        bundle = orient_by_streamline(bundle, orient_by)
    if affine is None:
        affine = np.eye(4)
    if len(bundle) == 0:
        raise ValueError("The bundle contains no streamlines")

    # Resample each streamline to the same number of points:
    fgarray = set_number_of_points(bundle, n_points)

    # Extract the values
    values = np.array(values_from_volume(data, fgarray, affine))

    if weights is None:
        weights = np.ones(values.shape) / values.shape[0]
    elif callable(weights):
        weights = weights(bundle, **weights_kwarg)
    else:
        # We check that weights *always sum to 1 across streamlines*:
        if not np.allclose(np.sum(weights, 0), np.ones(n_points)):
            raise ValueError("The sum of weights across streamlines must ",
                             "be equal to 1")

    return np.sum(weights * values, 0)
Esempio n. 37
0
def score_from_files(filename, masks_dir, bundles_dir,
                     tracts_attribs, basic_bundles_attribs,
                     save_segmented=False, save_IBs=False,
                     save_VBs=False, save_VCWPs=False,
                     segmented_out_dir='', segmented_base_name='',
                     verbose=False):
    """
    Computes all metrics in order to score a tractogram.

    Given a ``tck`` file of streamlines and a folder containing masks,
    compute the percent of: Valid Connections (VC), Invalid Connections (IC),
    Valid Connections but Wrong Path (VCWP), No Connections (NC),
    Average Bundle Coverage (ABC), Average ROIs Coverage (ARC),
    coverage per bundles and coverage per ROIs. It also provides the number of:
    Valid Bundles (VB), Invalid Bundles (IB) and streamlines per bundles.


    Parameters
    ------------
    filename : str
       name of a tracts file
    masks_dir : str
       name of the directory containing the masks
    save_segmented : bool
        if true, saves the segmented VC, IC, VCWP and NC

    Returns
    ---------
    scores : dict
        dictionnary containing a score for each metric
    indices : dict
        dictionnary containing the indices of streamlines composing VC, IC,
        VCWP and NC

    """
    if verbose:
        logging.basicConfig(level=logging.DEBUG)

    rois_dir = masks_dir + "rois/"
    bundles_masks_dir = masks_dir + "bundles/"
    wm_file = masks_dir + "wm.nii.gz"
    wm = nib.load(wm_file)

    streamlines = load_streamlines(filename, wm_file, tracts_attribs)

    ROIs = [nib.load(rois_dir + f) for f in sorted(os.listdir(rois_dir))]
    bundles_masks = [nib.load(bundles_masks_dir + f) for f in sorted(os.listdir(bundles_masks_dir))]
    ref_bundles = []

    # Ref bundles will contain {'name': 'name_of_the_bundle', 'threshold': thres_value,
    #                           'streamlines': list_of_streamlines}
    dummy_attribs = {'orientation': 'LPS'}
    qb = QuickBundles(threshold=REF_BUNDLES_THRESHOLD, metric=AveragePointwiseEuclideanMetric())

    out_centroids_dir = os.path.join(segmented_out_dir, os.path.pardir, "centroids")
    if not os.path.isdir(out_centroids_dir):
        os.mkdir(out_centroids_dir)

    rng = np.random.RandomState(42)

    for bundle_idx, bundle_f in enumerate(sorted(os.listdir(bundles_dir))):
        bundle_attribs = basic_bundles_attribs.get(os.path.basename(bundle_f))
        if bundle_attribs is None:
            raise ValueError("Missing basic bundle attribs for {0}".format(bundle_f))

        # # Already resample to avoid doing it for each iteration of chunking
        # orig_strl = [s for s in get_tracts_voxel_space_for_dipy(
        #                         os.path.join(bundles_dir, bundle_f),
        #                         wm_file, dummy_attribs)]
        orig_strl = load_streamlines(os.path.join(bundles_dir, bundle_f), wm_file, dummy_attribs)
        resamp_bundle = set_number_of_points(orig_strl, NB_POINTS_RESAMPLE)
        # resamp_bundle = [s.astype('f4') for s in resamp_bundle]

        indices = np.arange(len(resamp_bundle))
        rng.shuffle(indices)
        bundle_cluster_map = qb.cluster(resamp_bundle, ordering=indices)

        # bundle_cluster_map.refdata = resamp_bundle

        bundle_mask_inv = nib.Nifti1Image((1 - bundles_masks[bundle_idx].get_data()) * wm.get_data(),
                                          bundles_masks[bundle_idx].get_affine())

        ref_bundles.append({'name': os.path.basename(bundle_f).replace('.fib', '').replace('.tck', ''),
                            'threshold': bundle_attribs['cluster_threshold'],
                            'cluster_map': bundle_cluster_map,
                            'mask': bundles_masks[bundle_idx],
                            'mask_inv': bundle_mask_inv})

        logging.debug("{}: {} centroids".format(ref_bundles[-1]['name'], len(bundle_cluster_map)))
        nib.streamlines.save(nib.streamlines.Tractogram(bundle_cluster_map.centroids, affine_to_rasmm=np.eye(4)),
                             os.path.join(out_centroids_dir, ref_bundles[-1]['name'] + ".tck"))

    score_func = score_auto_extract_auto_IBs

    return score_func(streamlines, bundles_masks, ref_bundles, ROIs, wm,
                      save_segmented=save_segmented, save_IBs=save_IBs,
                      save_VBs=save_VBs, save_VCWPs=save_VCWPs,
                      out_segmented_strl_dir=segmented_out_dir,
                      base_out_segmented_strl=segmented_base_name,
                      ref_anat_fname=wm_file)
Esempio n. 38
0
def fornix_streamlines(no_pts=12):
    fname = get_data('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [set_number_of_points(i[0], no_pts) for i in streams]
    return streamlines
Esempio n. 39
0
def test_set_number_of_points():
    # Test resampling of only one streamline
    nb_points = 12
    modified_streamline_cython = set_number_of_points(
        streamline, nb_points)
    modified_streamline_python = set_number_of_points_python(
        streamline, nb_points)
    assert_equal(len(modified_streamline_cython), nb_points)
    # Using a 5 digits precision because of streamline is in float32.
    assert_array_almost_equal(modified_streamline_cython,
                              modified_streamline_python, 5)

    modified_streamline_cython = set_number_of_points(
        streamline_64bit, nb_points)
    modified_streamline_python = set_number_of_points_python(
        streamline_64bit, nb_points)
    assert_equal(len(modified_streamline_cython), nb_points)
    assert_array_almost_equal(modified_streamline_cython,
                              modified_streamline_python)

    res = []
    simple_streamline = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
    for nb_points in range(2, 200):
        modified_streamline_cython = set_number_of_points(
            simple_streamline, nb_points)
        res.append(nb_points - len(modified_streamline_cython))
    assert_equal(np.sum(res), 0)

    # Test resampling of multiple streamlines of different nb_points
    nb_points = 12
    modified_streamlines_cython = set_number_of_points(
        streamlines, nb_points)

    for i, s in enumerate(streamlines):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        # Using a 5 digits precision because of streamline is in float32.
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python, 5)

    modified_streamlines_cython = set_number_of_points(
        streamlines_64bit, nb_points)

    for i, s in enumerate(streamlines_64bit):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python)

    # Test streamlines with mixed dtype
    streamlines_mixed_dtype = [streamline,
                               streamline.astype(np.float64),
                               streamline.astype(np.int32),
                               streamline.astype(np.int64)]
    nb_points_mixed_dtype = [len(s) for s in set_number_of_points(
        streamlines_mixed_dtype, nb_points)]
    assert_array_equal(nb_points_mixed_dtype,
                       [nb_points] * len(streamlines_mixed_dtype))

    # Test streamlines with different shape
    modified_streamlines_cython = set_number_of_points(
        heterogeneous_streamlines, nb_points)

    for i, s in enumerate(heterogeneous_streamlines):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python)

    # Test streamline with integer dtype
    modified_streamline = set_number_of_points(streamline.astype(np.int32))
    assert_true(modified_streamline.dtype == np.float32)
    modified_streamline = set_number_of_points(streamline.astype(np.int64))
    assert_true(modified_streamline.dtype == np.float64)

    # Test empty list
    assert_equal(set_number_of_points([]), [])

    # Test streamline having only one point
    assert_raises(ValueError, set_number_of_points, np.array([[1, 2, 3]]))

    # We do not support list of lists, it should be numpy ndarray.
    streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
    assert_raises(AttributeError, set_number_of_points, streamline_unsupported)

    # Test setting number of points of a numpy with flag WRITABLE=False
    streamline_readonly = streamline.copy()
    streamline_readonly.setflags(write=False)
    assert_equal(len(set_number_of_points(streamline_readonly, nb_points=42)),
                 42)

    # Test setting computing length of a numpy with flag WRITABLE=False
    streamlines_readonly = []
    for s in streamlines:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
                 len(streamlines_readonly))

    streamlines_readonly = []
    for s in streamlines_64bit:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
                 len(streamlines_readonly))

    # Test if nb_points is less than 2
    assert_raises(ValueError, set_number_of_points, [np.ones((10, 3)),
                  np.ones((10, 3))], nb_points=1)
Esempio n. 40
0
 def extract(self, streamline):
     return set_number_of_points(streamline, self.nb_points)
def resample(streamlines, no_of_points):
    """Resample streamlines using 12 points and also flatten the streamlines
    """
    return np.array([set_number_of_points(s, no_of_points).ravel() for s in streamlines]) 
Esempio n. 42
0
def slr_with_qbx(static,
                 moving,
                 x0='affine',
                 rm_small_clusters=50,
                 maxiter=100,
                 select_random=None,
                 verbose=False,
                 greater_than=50,
                 less_than=250,
                 qbx_thr=[40, 30, 20, 15],
                 nb_pts=20,
                 progressive=True,
                 rng=None,
                 num_threads=None):
    """ Utility function for registering large tractograms.

    For efficiency, we apply the registration on cluster centroids and remove
    small clusters.

    Parameters
    ----------
    static : Streamlines
    moving : Streamlines

    x0 : str, optional.
        rigid, similarity or affine transformation model (default affine)

    rm_small_clusters : int, optional
        Remove clusters that have less than `rm_small_clusters` (default 50)

    select_random : int, optional.
        If not, None selects a random number of streamlines to apply clustering
        Default None.

    verbose : bool, optional
        If True, logs information about optimization. Default: False

    greater_than : int, optional
            Keep streamlines that have length greater than
            this value (default 50)

    less_than : int, optional
            Keep streamlines have length less than this value (default 250)

    qbx_thr : variable int
            Thresholds for QuickBundlesX (default [40, 30, 20, 15])

    np_pts : int, optional
            Number of points for discretizing each streamline (default 20)

    progressive : boolean, optional
            (default True)

    rng : RandomState
        If None creates RandomState in function.

    num_threads : int, optional
        Number of threads to be used for OpenMP parallelization. If None
        (default) the value of OMP_NUM_THREADS environment variable is used
        if it is set, otherwise all available threads are used. If < 0 the
        maximal number of threads minus |num_threads + 1| is used (enter -1 to
        use as many threads as possible). 0 raises an error. Only metrics
        using OpenMP will use this variable.

    Notes
    -----
    The order of operations is the following. First short or long streamlines
    are removed. Second, the tractogram or a random selection of the tractogram
    is clustered with QuickBundles. Then SLR [Garyfallidis15]_ is applied.

    References
    ----------
    .. [Garyfallidis15] Garyfallidis et al. "Robust and efficient linear
    registration of white-matter fascicles in the space of streamlines",
    NeuroImage, 117, 124--140, 2015
    .. [Garyfallidis14] Garyfallidis et al., "Direct native-space fiber
            bundle alignment for group comparisons", ISMRM, 2014.
    .. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
    bundles using local and global streamline-based registration and
    clustering, Neuroimage, 2017.

    """
    if rng is None:
        rng = np.random.RandomState()

    if verbose:
        logger.info('Static streamlines size {}'.format(len(static)))
        logger.info('Moving streamlines size {}'.format(len(moving)))

    def check_range(streamline, gt=greater_than, lt=less_than):

        if (length(streamline) > gt) & (length(streamline) < lt):
            return True
        else:
            return False

    streamlines1 = Streamlines(static[np.array(
        [check_range(s) for s in static])])
    streamlines2 = Streamlines(moving[np.array(
        [check_range(s) for s in moving])])
    if verbose:
        logger.info('Static streamlines after length reduction {}'.format(
            len(streamlines1)))
        logger.info('Moving streamlines after length reduction {}'.format(
            len(streamlines2)))

    if select_random is not None:
        rstreamlines1 = select_random_set_of_streamlines(streamlines1,
                                                         select_random,
                                                         rng=rng)
    else:
        rstreamlines1 = streamlines1

    rstreamlines1 = set_number_of_points(rstreamlines1, nb_pts)

    rstreamlines1._data.astype('f4')

    cluster_map1 = qbx_and_merge(rstreamlines1, thresholds=qbx_thr, rng=rng)
    qb_centroids1 = remove_clusters_by_size(cluster_map1, rm_small_clusters)

    if select_random is not None:
        rstreamlines2 = select_random_set_of_streamlines(streamlines2,
                                                         select_random,
                                                         rng=rng)
    else:
        rstreamlines2 = streamlines2

    rstreamlines2 = set_number_of_points(rstreamlines2, nb_pts)
    rstreamlines2._data.astype('f4')

    cluster_map2 = qbx_and_merge(rstreamlines2, thresholds=qbx_thr, rng=rng)

    qb_centroids2 = remove_clusters_by_size(cluster_map2, rm_small_clusters)

    if verbose:
        t = time()

    if not progressive:
        slr = StreamlineLinearRegistration(x0=x0,
                                           options={'maxiter': maxiter},
                                           num_threads=num_threads)
        slm = slr.optimize(qb_centroids1, qb_centroids2)
    else:
        bounds = DEFAULT_BOUNDS

        slm = progressive_slr(qb_centroids1,
                              qb_centroids2,
                              x0=x0,
                              metric=None,
                              bounds=bounds,
                              num_threads=num_threads)

    if verbose:
        logger.info('QB static centroids size %d' % len(qb_centroids1, ))
        logger.info('QB moving centroids size %d' % len(qb_centroids2, ))
        duration = time() - t
        logger.info('SLR finished in  %0.3f seconds.' % (duration, ))
        if slm.iterations is not None:
            logger.info('SLR iterations: %d ' % (slm.iterations, ))

    moved = slm.transform(moving)

    return moved, slm.matrix, qb_centroids1, qb_centroids2
Esempio n. 43
0
def test_LSCv2():
    xyz1 = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]], dtype='float32')
    xyz2 = np.array([[1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz3 = np.array([[1.1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz4 = np.array([[1, 0, 0], [2.1, 0, 0], [3, 0, 0]], dtype='float32')

    xyz5 = np.array([[100, 0, 0], [200, 0, 0], [300, 0, 0]], dtype='float32')
    xyz6 = np.array([[0, 20, 0], [0, 40, 0], [300, 50, 0]], dtype='float32')

    T = [xyz1, xyz2, xyz3, xyz4, xyz5, xyz6]
    pf.local_skeleton_clustering(T, 0.2)

    # print C
    # print len(C)

    pf.local_skeleton_clustering_3pts(T, 0.2)

    # print C2
    # print len(C2)

    # """

    for i in range(40):
        xyz = np.random.rand(3, 3).astype('f4')
        T.append(xyz)

    from time import time
    t1 = time()
    C3 = pf.local_skeleton_clustering(T, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C3))

    t1 = time()
    C4 = pf.local_skeleton_clustering_3pts(T, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C4))

    for c in C3:
        assert_equal(np.sum(C3[c]['hidden'] - C4[c]['hidden']), 0)

    T2 = []
    for i in range(10**4):
        xyz = np.random.rand(10, 3).astype('f4')
        T2.append(xyz)
    t1 = time()
    C5 = pf.local_skeleton_clustering(T2, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C5))

    fname = get_fnames('fornix')
    fornix = load_tractogram(fname, 'same', bbox_valid_check=False).streamlines

    T3 = set_number_of_points(fornix, 6)

    print('lenT3', len(T3))

    C = pf.local_skeleton_clustering(T3, 10.)

    print('lenC', len(C))
    """
Esempio n. 44
0
def subsample_streamlines(streamlines,
                          min_length=0.,
                          max_length=0.,
                          max_streamlines=0,
                          num_points=0,
                          arc_length=False,
                          rng=None):
    """
    Parameters
    ----------
    streamlines: list
        List of list of 3D points.
    min_length: float
        Minimum length of streamlines.
    max_length: float
        Maximum length of streamlines.
    max_streamlines: int
        Maximum number of streamlines to output.
    num_points: int
        Number of points per streamline in the output.
    arc_length: bool
        Whether to downsample using arc length parametrization.
    rng: RandomState object
        Random number generator to use for shuffling the data.
        By default, a constant seed is used.

    Return
    ------
    average: list
        List of subsampled streamlines.
    """

    if rng is None:
        rng = np.random.RandomState(1234)

    num_streamlines = len(streamlines)
    if max_streamlines <= 0:
        max_streamlines = num_streamlines

    lengths = np.zeros(num_streamlines)
    for i in np.arange(num_streamlines):
        lengths[i] = dipy.tracking.metrics.length(streamlines[i])

    ind = range(0, num_streamlines)
    rng.shuffle(ind)
    results = []

    while len(ind) > 0 and len(results) < max_streamlines:
        i = ind.pop()
        if (lengths[i] >= min_length
                and (max_length <= 0. or lengths[i] <= max_length)):
            if num_points:
                if arc_length:
                    line = set_number_of_points(streamlines[i], num_points)
                else:
                    line = downsample(streamlines[i], num_points)
                results.append(line)
            else:
                results.append(streamlines[i])

    return results
Esempio n. 45
0
    def _prepare_batch(self, indices):
        orig_streamlines, volume_ids = self.dataset[indices]
        streamlines = self._add_noise_to_streamlines(orig_streamlines.copy())

        streamlines._lengths = streamlines._lengths.astype("int64")
        if self.resample_streamlines:
            # streamline_length = np.max(streamlines._lengths)  # Sequences are resampled so that they have the same length.
            streamline_length = np.min(
                streamlines._lengths
            )  # Sequences are resampled so that they have the same length.
            streamlines = set_number_of_points(streamlines,
                                               nb_points=streamline_length)

        inputs = streamlines._data  # Streamlines coordinates
        targets = streamlines._data[
            1:] - streamlines._data[:-1]  # Unnormalized directions
        if self.normalize_target:
            targets = targets / np.sqrt(
                np.sum(targets**2, axis=1,
                       keepdims=True))  # Normalized directions

        batch_size = len(streamlines)
        if self.use_augment_by_flipping:
            batch_size *= 2

        max_streamline_length = np.max(
            streamlines._lengths
        )  # Sequences are padded so that they have the same length.
        batch_masks = np.zeros((batch_size, max_streamline_length - self.k),
                               dtype=floatX)
        batch_inputs = np.zeros(
            (batch_size, max_streamline_length - self.k, inputs.shape[1]),
            dtype=floatX)
        batch_targets = np.zeros(
            (batch_size, max_streamline_length - 1, self.target_size),
            dtype=floatX)

        for i, (offset, length) in enumerate(
                zip(streamlines._offsets, streamlines._lengths)):
            n = length - self.k
            batch_masks[i, :n] = 1
            batch_inputs[i, :n] = inputs[offset:offset + n]
            batch_targets[i, :length - 1] = targets[offset:offset + length - 1]

            if self.use_augment_by_flipping:
                batch_masks[i + len(streamlines), :n] = 1
                batch_inputs[i + len(streamlines), :n] = inputs[offset +
                                                                self.k:offset +
                                                                length][::-1]
                batch_targets[i + len(streamlines), :length -
                              1] = -targets[offset:offset + length - 1][::-1]

        batch_volume_ids = np.tile(volume_ids[:, None, None],
                                   (1 + self.use_augment_by_flipping,
                                    max_streamline_length - self.k, 1))
        batch_inputs = np.concatenate([batch_inputs, batch_volume_ids],
                                      axis=2)  # Streamlines coords + dwi ID

        if self.feed_previous_direction:
            previous_directions = np.concatenate([
                np.zeros((batch_size, 1, 3), dtype=floatX),
                batch_targets[:, :-self.k]
            ],
                                                 axis=1)
            previous_directions = previous_directions / np.sqrt(
                np.sum(previous_directions**2, axis=2, keepdims=True) +
                1e-6)  # Normalized directions
            batch_inputs = np.concatenate(
                [batch_inputs, previous_directions],
                axis=2)  # Streamlines coords + dwi ID + previous direction

        return batch_inputs, batch_targets, batch_masks
Esempio n. 46
0
def auto_extract_VCs(streamlines, ref_bundles):
    # Streamlines = list of all streamlines

    VC = 0
    VC_idx = set()

    found_vbs_info = {}
    for bundle in ref_bundles:
        found_vbs_info[bundle['name']] = {
            'nb_streamlines': 0,
            'streamlines_indices': set()
        }

    # Need to bookkeep because we chunk for big datasets
    processed_strl_count = 0
    chunk_size = 5000
    chunk_it = 0

    nb_bundles = len(ref_bundles)
    bundles_found = [False] * nb_bundles

    logging.debug("Starting scoring VCs")

    qb = QuickBundles(threshold=20, metric=AveragePointwiseEuclideanMetric())

    # Start loop here for big datasets
    while processed_strl_count < len(streamlines):
        logging.debug("Starting chunk: {0}".format(chunk_it))

        strl_chunk = streamlines[chunk_it * chunk_size:(chunk_it + 1) *
                                 chunk_size]

        processed_strl_count += len(strl_chunk)
        cur_chunk_VC_idx, cur_chunk_IC_idx, cur_chunk_VCWP_idx = set(), set(
        ), set()

        # Already resample and run quickbundles on the submission chunk,
        # to avoid doing it at every call of auto_extract
        rstreamlines = set_number_of_points(strl_chunk, NB_POINTS_RESAMPLE)

        # qb.cluster had problem with f8
        rstreamlines = [s.astype('f4') for s in rstreamlines]

        chunk_cluster_map = qb.cluster(rstreamlines)
        chunk_cluster_map.refdata = strl_chunk

        logging.debug("Starting VC identification through auto_extract")

        for bundle_idx, ref_bundle in enumerate(ref_bundles):
            # The selected indices are from [0, len(strl_chunk)]
            selected_streamlines_indices = auto_extract(
                ref_bundle['cluster_map'],
                chunk_cluster_map,
                clean_thr=ref_bundle['threshold'])

            # Remove duplicates, when streamlines are assigned to multiple VBs.
            selected_streamlines_indices = set(selected_streamlines_indices) - \
                                           cur_chunk_VC_idx
            cur_chunk_VC_idx |= selected_streamlines_indices

            nb_selected_streamlines = len(selected_streamlines_indices)

            if nb_selected_streamlines:
                bundles_found[bundle_idx] = True
                VC += nb_selected_streamlines

                # Shift indices to match the real number of streamlines
                global_select_strl_indices = set([
                    v + chunk_it * chunk_size
                    for v in selected_streamlines_indices
                ])
                vb_info = found_vbs_info.get(ref_bundle['name'])
                vb_info['nb_streamlines'] += nb_selected_streamlines
                vb_info['streamlines_indices'] |= global_select_strl_indices

                VC_idx |= global_select_strl_indices
            else:
                global_select_strl_indices = set()

        chunk_it += 1

    # Compute bundle overlap, overreach and f1_scores and update found_vbs_info
    for bundle_idx, ref_bundle in enumerate(ref_bundles):
        bundle_name = ref_bundle["name"]
        bundle_mask = ref_bundle["mask"]

        vb_info = found_vbs_info[bundle_name]

        # Streamlines are in voxel space since that's how they were
        # loaded in the scoring function.
        tractogram = Tractogram(
            streamlines=(streamlines[i]
                         for i in vb_info['streamlines_indices']),
            affine_to_rasmm=bundle_mask.affine)

        scores = {}
        if len(tractogram) > 0:
            scores = compute_bundle_coverage_scores(tractogram, bundle_mask)

        vb_info['overlap'] = scores.get("OL", 0)
        vb_info['overreach'] = scores.get("OR", 0)
        vb_info['overreach_norm'] = scores.get("ORn", 0)
        vb_info['f1_score'] = scores.get("F1", 0)

    return VC_idx, found_vbs_info, bundles_found
    from dipy.tracking.streamline import set_number_of_points
    from dipy.tracking.distances import bundles_distances_mdf
    from time import time

    trk_fn = 'sub-100206_var-FNAL_tract.trk'
    nb_points = 32
    nb_prototypes = 100
    streamline_distance = bundles_distances_mdf

    streamlines, header, lengths, idxs = load_streamlines(trk_fn,
                                                          idxs=None,
                                                          apply_affine=True,
                                                          container='list',
                                                          verbose=True)
    print("Resampling to %s points" % nb_points)
    streamlines = set_number_of_points(streamlines, nb_points)
    # streamlines = np.array(set_number_of_points(streamlines, nb_points),
    #                        dtype=np.object)

    print("embed_flattened():")
    t0 = time()
    X_flattened = embed_flattened(streamlines)
    print("%s sec." % (time() - t0))

    print("embed_flattened_plus_flipped():")
    t0 = time()
    X_flattened_plus_flipped = embed_flattened_plus_flipped(streamlines)
    print("%s sec." % (time() - t0))

    print("embed_ordered():")
    t0 = time()
Esempio n. 48
0
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    full_tfile = nib.streamlines.load(args.full_tfile)
    model_tfile = nib.streamlines.load(args.model_tfile)
    model_mask = nib.load(args.model_mask)

    # Bring streamlines to voxel space and where coordinate (0,0,0) represents the corner of a voxel.
    model_tfile.tractogram.apply_affine(np.linalg.inv(model_mask.affine))
    model_tfile.streamlines._data += 0.5  # Shift of half a voxel
    full_tfile.tractogram.apply_affine(np.linalg.inv(model_mask.affine))
    full_tfile.streamlines._data += 0.5  # Shift of half a voxel

    assert (model_mask.get_data().sum() == create_binary_map(
        model_tfile.streamlines, model_mask).sum())

    # Resample streamlines
    full_streamlines = set_number_of_points(full_tfile.streamlines,
                                            args.nb_points_resampling)
    model_streamlines = set_number_of_points(model_tfile.streamlines,
                                             args.nb_points_resampling)

    # Segment model
    rng = np.random.RandomState(42)
    indices = np.arange(len(model_streamlines))
    rng.shuffle(indices)
    qb = QuickBundles(args.qb_threshold)
    clusters = qb.cluster(model_streamlines, ordering=indices)

    # Try to find optimal assignment threshold
    best_threshold = None
    best_f1_score = -np.inf
    thresholds = np.arange(-2, 10, 0.2) + args.qb_threshold
    for threshold in thresholds:
        indices = qb.find_closest(clusters,
                                  full_streamlines,
                                  threshold=threshold)
        nb_assignments = np.sum(indices != -1)

        mask = create_binary_map(full_tfile.streamlines[indices != -1],
                                 model_mask)

        overlap_per_bundle = _compute_overlap(model_mask.get_data(), mask)
        overreach_per_bundle = _compute_overreach(model_mask.get_data(), mask)
        # overreach_norm_gt_per_bundle = _compute_overreach_normalize_gt(model_mask.get_data(), mask)
        f1_score = _compute_f1_score(overlap_per_bundle, overreach_per_bundle)
        if best_f1_score < f1_score:
            best_threshold = threshold
            best_f1_score = f1_score

        print("{}:\t {}/{} ({:.1%}) {:.1%}/{:.1%} ({:.1%}) {}/{}".format(
            threshold, nb_assignments, len(model_streamlines),
            nb_assignments / len(model_streamlines), overlap_per_bundle,
            overreach_per_bundle, f1_score, mask.sum(),
            model_mask.get_data().sum()))

        if overlap_per_bundle >= 1:
            break

    print("Best threshold: {} with F1-Score of {}".format(
        best_threshold, best_f1_score))
Esempio n. 49
0
def bench_quickbundles():
    dtype = "float32"
    repeat = 10
    nb_points = 12

    fname = get_fnames('fornix')

    fornix = load_tractogram(fname, 'same', bbox_valid_check=False).streamlines

    fornix_streamlines = Streamlines(fornix)
    fornix_streamlines = set_number_of_points(fornix_streamlines, nb_points)

    # Create eight copies of the fornix to be clustered (one in each octant).
    streamlines = []
    streamlines += [
        s + np.array([100, 100, 100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([100, -100, 100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([100, 100, -100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([100, -100, -100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([-100, 100, 100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([-100, -100, 100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([-100, 100, -100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([-100, -100, -100], dtype) for s in fornix_streamlines
    ]

    # The expected number of clusters of the fornix using threshold=10 is 4.
    threshold = 10.
    expected_nb_clusters = 4 * 8

    print("Timing QuickBundles 1.0 vs. 2.0")

    qb2 = QB_New(threshold)
    qb2_time = measure("clusters = qb2.cluster(streamlines)", repeat)
    print("QuickBundles2 time: {0:.4}sec".format(qb2_time))
    print("Speed up of {0}x".format(qb1_time / qb2_time))
    clusters = qb2.cluster(streamlines)
    sizes2 = map(len, clusters)
    indices2 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(list(sizes2), sizes1)
    assert_arrays_equal(indices2, indices1)

    qb = QB_New(threshold, metric=MDFpy())
    qb3_time = measure("clusters = qb.cluster(streamlines)", repeat)
    print("QuickBundles2_python time: {0:.4}sec".format(qb3_time))
    print("Speed up of {0}x".format(qb1_time / qb3_time))
    clusters = qb.cluster(streamlines)
    sizes3 = map(len, clusters)
    indices3 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(list(sizes3), sizes1)
    assert_arrays_equal(indices3, indices1)
Esempio n. 50
0
from time import sleep
from dipy.data import two_cingulum_bundles

cb_subj1, cb_subj2 = two_cingulum_bundles()

from dipy.align.streamlinear import StreamlineLinearRegistration
from dipy.tracking.streamline import set_number_of_points
"""
An important step before running the registration is to resample the
streamlines so that they both have the same number of points per streamline.
Here we will use 20 points. This step is not optional. Inputting streamlines
with a different number of points will break the theoretical advantages of using
the SLR as explained in [Garyfallidis15]_.
"""

cb_subj1 = set_number_of_points(cb_subj1, 20)
cb_subj2 = set_number_of_points(cb_subj2, 20)
"""
Let's say now that we want to move the ``cb_subj2`` (moving) so that it can be
aligned with ``cb_subj1`` (static). Here is how this is done.
"""

srr = StreamlineLinearRegistration()

srm = srr.optimize(static=cb_subj1, moving=cb_subj2)
"""
After the optimization is finished we can apply the transformation to
``cb_subj2``.
"""

cb_subj2_aligned = srm.transform(cb_subj2)
Esempio n. 51
0
def fornix_streamlines(no_pts=12):
    fname = get_data('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [set_number_of_points(i[0], no_pts) for i in streams]
    return streamlines
Esempio n. 52
0
track1, header1, lengths1, indices1 = load_streamlines(filename1,
                                                       container="array",
                                                       verbose=True,
                                                       idxs=N_streamlines1,
                                                       apply_affine=True)
track2, header2, lengths2, indices2 = load_streamlines(filename2,
                                                       container="array",
                                                       verbose=True,
                                                       idxs=N_streamlines2,
                                                       apply_affine=True)

# %% Resampling
print("setting the same number of points for both the tracts...")

track1 = np.array(set_number_of_points(track1, N_points))
track2 = np.array(set_number_of_points(track2, N_points))

# %% Nearest Neightbours
print("Nearest Neightbours")

distances, neighbours = streamlines_neighbors(track1, track2, k=k)

# %% Sparse Cost Matrix

print("Creation of the Sparse Cost Matrix")

cost = sparse.csr_matrix((N_streamlines1, N_streamlines2), dtype=np.float)

tmp = np.repeat(np.arange(N_streamlines1)[:, None], k, axis=1)
Esempio n. 53
0
def _auto_extract_VCs(streamlines, ref_bundles):
    # Streamlines = list of all streamlines

    # TODO check what is neede
    # VC = 0
    VC_idx = set()

    found_vbs_info = {}
    for bundle in ref_bundles:
        found_vbs_info[bundle['name']] = {'nb_streamlines': 0,
                                          'streamlines_indices': set()}

    # TODO probably not needed
    # already_assigned_streamlines_idx = set()

    # Need to bookkeep because we chunk for big datasets
    processed_strl_count = 0
    chunk_size = len(streamlines)
    chunk_it = 0

    # nb_bundles = len(ref_bundles)
    # bundles_found = [False] * nb_bundles
    #bundles_potential_VCWP = [set()] * nb_bundles

    logging.debug("Starting scoring VCs")

    # Start loop here for big datasets
    while processed_strl_count < len(streamlines):
        if processed_strl_count > 0:
            raise NotImplementedError("Not supposed to have more than one chunk!")

        logging.debug("Starting chunk: {0}".format(chunk_it))

        strl_chunk = streamlines[chunk_it * chunk_size: (chunk_it + 1) * chunk_size]

        processed_strl_count += len(strl_chunk)

        # Already resample and run quickbundles on the submission chunk,
        # to avoid doing it at every call of auto_extract
        rstreamlines = set_number_of_points(nib.streamlines.ArraySequence(strl_chunk), NB_POINTS_RESAMPLE)

        # qb.cluster had problem with f8
        # rstreamlines = [s.astype('f4') for s in rstreamlines]

        # chunk_cluster_map = qb.cluster(rstreamlines)
        # chunk_cluster_map.refdata = strl_chunk

        # # Merge clusters
        # all_bundles = ClusterMapCentroid()
        # cluster_id_to_bundle_id = []
        # for bundle_idx, ref_bundle in enumerate(ref_bundles):
        #     clusters = ref_bundle["cluster_map"]
        #     cluster_id_to_bundle_id.extend([bundle_idx] * len(clusters))
        #     all_bundles.add_cluster(*clusters)

        # logging.debug("Starting VC identification through auto_extract")
        # qb = QuickBundles(threshold=10, metric=AveragePointwiseEuclideanMetric())
        # closest_bundles = qb.find_closest(all_bundles, rstreamlines, threshold=7)

        # print("Unassigned streamlines: {}".format(np.sum(closest_bundles == -1)))

        # for cluster_id, bundle_id in enumerate(cluster_id_to_bundle_id):
        #     indices = np.where(closest_bundles == cluster_id)[0]
        #     print("{}/{} ({}) Found {}".format(cluster_id, len(cluster_id_to_bundle_id), ref_bundles[bundle_id]['name'], len(indices)))
        #     if len(indices) == 0:
        #         continue

        #     vb_info = found_vbs_info.get(ref_bundles[bundle_id]['name'])
        #     indices = set(indices)
        #     vb_info['nb_streamlines'] += len(indices)
        #     vb_info['streamlines_indices'] |= indices
        #     VC_idx |= indices

        qb = QuickBundles(threshold=10, metric=AveragePointwiseEuclideanMetric())
        ordering = np.arange(len(rstreamlines))
        logging.debug("Starting VC identification through auto_extract")
        for bundle_idx, ref_bundle in enumerate(ref_bundles):
            print(ref_bundle['name'], ref_bundle['threshold'], len(ref_bundle['cluster_map']))
            # The selected indices are from [0, len(strl_chunk)]
            # selected_streamlines_indices = auto_extract(ref_bundle['cluster_map'],
            #                                             rstreamlines,
            #                                             clean_thr=ref_bundle['threshold'],
            #                                             ordering=ordering)

            closest_bundles = qb.find_closest(ref_bundle['cluster_map'], rstreamlines[ordering], ref_bundle['threshold'])
            selected_streamlines_indices = ordering[closest_bundles >= 0]
            ordering = ordering[closest_bundles == -1]

            # Remove duplicates, when streamlines are assigned to multiple VBs.
            # TODO better handling of this case
            # selected_streamlines_indices = set(selected_streamlines_indices) - cur_chunk_VC_idx
            # cur_chunk_VC_idx |= selected_streamlines_indices

            nb_selected_streamlines = len(selected_streamlines_indices)
            print("{} assigned".format(nb_selected_streamlines))

            if nb_selected_streamlines:
                # bundles_found[bundle_idx] = True
                # VC += nb_selected_streamlines

                # Shift indices to match the real number of streamlines
                global_select_strl_indices = set([v + chunk_it * chunk_size
                                                 for v in selected_streamlines_indices])
                vb_info = found_vbs_info.get(ref_bundle['name'])
                vb_info['nb_streamlines'] += nb_selected_streamlines
                vb_info['streamlines_indices'] |= global_select_strl_indices

                VC_idx |= global_select_strl_indices
                # already_assigned_streamlines_idx |= global_select_strl_indices

        chunk_it += 1

    return VC_idx, found_vbs_info
Esempio n. 54
0
def load_tractography_dataset_from_dwi_and_tractogram(dwi,
                                                      tractogram,
                                                      volume_manager,
                                                      use_sh_coeffs=False,
                                                      bvals=None,
                                                      bvecs=None,
                                                      step_size=None,
                                                      mean_centering=True):
    # Load signal
    signal = nib.load(dwi)
    signal.get_data()  # Forces loading volume in-memory.
    basename = re.sub('(\.gz|\.nii.gz)$', '', dwi)
    bvals = basename + '.bvals' if bvals is None else bvals
    bvecs = basename + '.bvecs' if bvecs is None else bvecs

    gradients = gradient_table(bvals, bvecs)
    tracto_data = TractographyData(signal, gradients)

    # Load streamlines
    tfile = nib.streamlines.load(tractogram)
    tractogram = tfile.tractogram

    # Resample streamline to have a fixed step size, if needed.
    if step_size is not None:
        print("Resampling streamlines to have a step size of {}mm".format(
            step_size))
        streamlines = tractogram.streamlines
        streamlines._lengths = streamlines._lengths.astype(int)
        streamlines._offsets = streamlines._offsets.astype(int)
        lengths = length(streamlines)
        nb_points = np.ceil(lengths / step_size).astype(int)
        new_streamlines = (set_number_of_points(s, n)
                           for s, n in zip(streamlines, nb_points))
        tractogram = nib.streamlines.Tractogram(new_streamlines,
                                                affine_to_rasmm=np.eye(4))

    # Compute matrix that brings streamlines back to diffusion voxel space.
    rasmm2vox_affine = np.linalg.inv(signal.affine)
    tractogram.apply_affine(rasmm2vox_affine)

    # Add streamlines to the TractogramData
    tracto_data.add(tractogram.streamlines, "tractogram")

    dwi = tracto_data.signal
    bvals = tracto_data.gradients.bvals
    bvecs = tracto_data.gradients.bvecs

    if use_sh_coeffs:
        # Use 45 spherical harmonic coefficients to represent the diffusion signal.
        volume = neurotools.get_spherical_harmonics_coefficients(
            dwi, bvals, bvecs,
            mean_centering=mean_centering).astype(np.float32)
    else:
        # Resample the diffusion signal to have 100 directions.
        volume = neurotools.resample_dwi(dwi,
                                         bvals,
                                         bvecs,
                                         mean_centering=mean_centering).astype(
                                             np.float32)

    tracto_data.signal.uncache(
    )  # Free some memory as we don't need the original signal.
    subject_id = volume_manager.register(volume)
    tracto_data.subject_id = subject_id

    return TractographyDataset([tracto_data], "dataset", keep_on_cpu=True)
Esempio n. 55
0
def tractograms_slr(moving_tractogram, static_tractogram):

    table_filename = 'affine_dictionary.pickle'
    if isfile(table_filename):
        print("Retrieving past results from %s" % table_filename)
        table = pickle.load(open(table_filename))
    else:
        print("Creating a new table which will be saved in %s" %
              table_filename)
        table = {}

    moving_tractogram_basename = ntpath.basename(moving_tractogram)
    static_tractogram_basename = ntpath.basename(static_tractogram)
    key = tuple([(moving_tractogram_basename, static_tractogram_basename)])[0]

    if table.has_key(key):
        print("Affine already exists in %s" % table_filename)
        affine = table[moving_tractogram_basename,
                       static_tractogram_basename].items()[0][1]
    else:
        print("Loading tractograms...")
        moving_tractogram = nib.streamlines.load(moving_tractogram)
        moving_tractogram = moving_tractogram.streamlines
        static_tractogram = nib.streamlines.load(static_tractogram)
        static_tractogram = static_tractogram.streamlines

        print("Set parameters as in Garyfallidis et al. 2015.")
        threshold_length = 40.0  # 50mm / 1.25
        qb_threshold = 16.0  # 20mm / 1.25
        nb_res_points = 20

        print("Performing QuickBundles of static tractogram and resampling...")
        st = np.array(
            [s for s in static_tractogram if len(s) > threshold_length],
            dtype=np.object)
        qb = QuickBundles(threshold=qb_threshold)
        st_clusters = [cluster.centroid for cluster in qb.cluster(st)]
        st_clusters = set_number_of_points(st_clusters, nb_res_points)

        print("Performing QuickBundles of moving tractogram and resampling...")
        mt = np.array(
            [s for s in moving_tractogram if len(s) > threshold_length],
            dtype=np.object)
        qb = QuickBundles(threshold=qb_threshold)
        mt_clusters = [cluster.centroid for cluster in qb.cluster(mt)]
        mt_clusters = set_number_of_points(mt_clusters, nb_res_points)

        print("Performing Linear Registration...")
        srr = StreamlineLinearRegistration()
        srm = srr.optimize(static=st_clusters, moving=mt_clusters)

        print(
            "Affine transformation matrix with Streamline Linear Registration:"
        )
        affine = srm.matrix
        print('%s' % affine)

        print("Fill the dictionary.")
        table[moving_tractogram_basename, static_tractogram_basename] = {
            'affine': affine
        }
        pickle.dump(table,
                    open(table_filename, 'w'),
                    protocol=pickle.HIGHEST_PROTOCOL)

    return affine
def compute_terminal_points_matrix(S_A, S_B):
    from dipy.tracking.streamline import set_number_of_points
    S_A_res = np.array([set_number_of_points(s, nb_points=2) for s in S_A])
    S_B_res = np.array([set_number_of_points(s, nb_points=2) for s in S_B])
    return 2.0 * bundles_distances_mdf(S_A_res, S_B_res)
Esempio n. 57
0
def bundle_analysis(model_bundle_folder, bundle_folder, orig_bundle_folder,
                    metric_folder, group, subject, no_disks=100,
                    out_dir=''):
    """
    Applies statistical analysis on bundles and saves the results
    in a directory specified by ``out_dir``.

    Parameters
    ----------
    model_bundle_folder : string
        Path to the input model bundle files. This path may contain
        wildcards to process multiple inputs at once.
    bundle_folder : string
        Path to the input bundle files in common space. This path may
        contain wildcards to process multiple inputs at once.
    orig_folder : string
        Path to the input bundle files in native space. This path may
        contain wildcards to process multiple inputs at once.
    metric_folder : string
        Path to the input dti metric or/and peak files. It will be used as
        metric for statistical analysis of bundles.
    group : string
        what group subject belongs to e.g. control or patient
    subject : string
        subject id e.g. 10001
    no_disks : integer, optional
        Number of disks used for dividing bundle into disks. (Default 100)
    out_dir : string, optional
        Output directory (default input file directory)

    References
    ----------
    .. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak,
    E. Garyfallidis, Bundle Analytics: a computational and statistical
    analyses framework for tractometric studies, Proceedings of:
    International Society of Magnetic Resonance in Medicine (ISMRM),
    Montreal, Canada, 2019.

    """

    dt = dict()

    mb = os.listdir(model_bundle_folder)
    mb.sort()
    bd = os.listdir(bundle_folder)
    bd.sort()
    org_bd = os.listdir(orig_bundle_folder)
    org_bd.sort()
    n = len(org_bd)

    for io in range(n):
        mbundles, _ = load_trk(os.path.join(model_bundle_folder, mb[io]))
        bundles, _ = load_trk(os.path.join(bundle_folder, bd[io]))
        orig_bundles, _ = load_trk(os.path.join(orig_bundle_folder,
                                   org_bd[io]))

        mbundle_streamlines = set_number_of_points(mbundles,
                                                   nb_points=no_disks)

        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=25., metric=metric)
        clusters = qb.cluster(mbundle_streamlines)
        centroids = Streamlines(clusters.centroids)

        print('Number of centroids ', len(centroids.data))
        print('Model bundle ', mb[io])
        print('Number of streamlines in bundle in common space ',
              len(bundles))
        print('Number of streamlines in bundle in original space ',
              len(orig_bundles))

        _, indx = cKDTree(centroids.data, 1,
                          copy_data=True).query(bundles.data, k=1)

        metric_files_names = os.listdir(metric_folder)
        _, affine = load_nifti(os.path.join(metric_folder, "fa.nii.gz"))

        affine_r = np.linalg.inv(affine)
        transformed_orig_bundles = transform_streamlines(orig_bundles,
                                                         affine_r)

        for mn in range(0, len(metric_files_names)):

            ind = np.array(indx)
            fm = metric_files_names[mn][:2]
            bm = mb[io][:-4]
            dt = dict()
            metric_name = os.path.join(metric_folder,
                                       metric_files_names[mn])

            if metric_files_names[mn][2:] == '.nii.gz':
                metric, _ = load_nifti(metric_name)

                dti_measures(transformed_orig_bundles, metric, dt, fm,
                             bm, subject, group, ind, out_dir)

            else:
                fm = metric_files_names[mn][:3]
                metric = load_peaks(metric_name)
                peak_values(bundles, metric, dt, fm, bm, subject, group,
                            ind, out_dir)
Esempio n. 58
0
def bundle_analysis(model_bundle_folder, bundle_folder, orig_bundle_folder,
                    metric_folder, group, subject, no_disks=100,
                    out_dir=''):
    """
    Applies statistical analysis on bundles and saves the results
    in a directory specified by ``out_dir``.

    Parameters
    ----------
    model_bundle_folder : string
        Path to the input model bundle files. This path may contain
        wildcards to process multiple inputs at once.
    bundle_folder : string
        Path to the input bundle files in common space. This path may
        contain wildcards to process multiple inputs at once.
    orig_folder : string
        Path to the input bundle files in native space. This path may
        contain wildcards to process multiple inputs at once.
    metric_folder : string
        Path to the input dti metric or/and peak files. It will be used as
        metric for statistical analysis of bundles.
    group : string
        what group subject belongs to e.g. control or patient
    subject : string
        subject id e.g. 10001
    no_disks : integer, optional
        Number of disks used for dividing bundle into disks. (Default 100)
    out_dir : string, optional
        Output directory (default input file directory)

    References
    ----------
    .. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak,
    E. Garyfallidis, Bundle Analytics: a computational and statistical
    analyses framework for tractometric studies, Proceedings of:
    International Society of Magnetic Resonance in Medicine (ISMRM),
    Montreal, Canada, 2019.

    """

    dt = dict()

    mb = os.listdir(model_bundle_folder)
    mb.sort()
    bd = os.listdir(bundle_folder)
    bd.sort()
    org_bd = os.listdir(orig_bundle_folder)
    org_bd.sort()
    n = len(org_bd)

    for io in range(n):
        mbundles = load_tractogram(os.path.join(model_bundle_folder, mb[io]),
                                   'same',
                                   bbox_valid_check=False).streamlines
        bundles = load_tractogram(os.path.join(bundle_folder, bd[io]),
                                  'same',
                                  bbox_valid_check=False).streamlines
        orig_bundles = load_tractogram(os.path.join(orig_bundle_folder,
                                                    org_bd[io]), 'same',
                                       bbox_valid_check=False).streamlines

        mbundle_streamlines = set_number_of_points(mbundles,
                                                   nb_points=no_disks)

        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=25., metric=metric)
        clusters = qb.cluster(mbundle_streamlines)
        centroids = Streamlines(clusters.centroids)

        print('Number of centroids ', len(centroids.data))
        print('Model bundle ', mb[io])
        print('Number of streamlines in bundle in common space ',
              len(bundles))
        print('Number of streamlines in bundle in original space ',
              len(orig_bundles))

        _, indx = cKDTree(centroids.data, 1,
                          copy_data=True).query(bundles.data, k=1)

        metric_files_names = os.listdir(metric_folder)
        _, affine = load_nifti(os.path.join(metric_folder, "fa.nii.gz"))

        affine_r = np.linalg.inv(affine)
        transformed_orig_bundles = transform_streamlines(orig_bundles,
                                                         affine_r)

        for mn in range(0, len(metric_files_names)):

            ind = np.array(indx)
            fm = metric_files_names[mn][:2]
            bm = mb[io][:-4]
            dt = dict()
            metric_name = os.path.join(metric_folder,
                                       metric_files_names[mn])

            if metric_files_names[mn][2:] == '.nii.gz':
                metric, _ = load_nifti(metric_name)

                dti_measures(transformed_orig_bundles, metric, dt, fm,
                             bm, subject, group, ind, out_dir)

            else:
                fm = metric_files_names[mn][:3]
                metric = load_peaks(metric_name)
                peak_values(bundles, metric, dt, fm, bm, subject, group,
                            ind, out_dir)
Esempio n. 59
0
def afq_profile(data, bundle, affine=None, n_points=100,
                orient_by=None, weights=None, **weights_kwarg):
    """
    Calculates a summarized profile of data for a bundle or tract
    along its length.

    Follows the approach outlined in [Yeatman2012]_.

    Parameters
    ----------
    data : 3D volume
        The statistic to sample with the streamlines.

    bundle : StreamLines class instance
        The collection of streamlines (possibly already resampled into an array
         for each to have the same length) with which we are resampling. See
         Note below about orienting the streamlines.

    affine: 4-by-4 array, optional.
        A transformation associated with the streamlines in the bundle.
        Default: identity.

    n_points: int, optional
        The number of points to sample along the bundle. Default: 100.

    orient_by: streamline, optional.
        A streamline to use as a standard to orient all of the streamlines in
        the bundle according to.

    weights : 1D array or 2D array or callable (optional)
        Weight each streamline (1D) or each node (2D) when calculating the
        tract-profiles. Must sum to 1 across streamlines (in each node if
        relevant). If callable, this is a function that calculates weights.

    weights_kwarg : key-word arguments
        Additional key-word arguments to pass to the weight-calculating
        function. Only to be used if weights is a callable.

    Returns
    -------
    ndarray : a 1D array with the profile of `data` along the length of
        `bundle`

    Note
    ----
    Before providing a bundle as input to this function, you will need to make
    sure that the streamlines in the bundle are all oriented in the same
    orientation relative to the bundle (use :func:`orient_by_streamline`).

    References
    ----------
    .. [Yeatman2012] Yeatman, Jason D., Robert F. Dougherty,
       Nathaniel J. Myall, Brian A. Wandell, and Heidi M. Feldman. 2012.
       "Tract Profiles of White Matter Properties: Automating Fiber-Tract
       Quantification" PloS One 7 (11): e49790.
    """
    if orient_by is not None:
        bundle = orient_by_streamline(bundle, orient_by, affine=affine)
    if len(bundle) == 0:
        raise ValueError("The bundle contains no streamlines")

    # Resample each streamline to the same number of points:
    fgarray = set_number_of_points(bundle, n_points)

    # Extract the values
    values = np.array(values_from_volume(data, fgarray, affine=affine))

    if weights is None:
        weights = np.ones(values.shape) / values.shape[0]
    elif callable(weights):
        weights = weights(bundle, **weights_kwarg)
    else:
        # We check that weights *always sum to 1 across streamlines*:
        if not np.allclose(np.sum(weights, 0), np.ones(n_points)):
            raise ValueError("The sum of weights across streamlines must ",
                             "be equal to 1")

    return np.sum(weights * values, 0)
Esempio n. 60
0
def test_set_number_of_points():
    # Test resampling of only one streamline
    nb_points = 12
    modified_streamline_cython = set_number_of_points(streamline, nb_points)
    modified_streamline_python = set_number_of_points_python(
        streamline, nb_points)
    assert_equal(len(modified_streamline_cython), nb_points)
    # Using a 5 digits precision because of streamline is in float32.
    assert_array_almost_equal(modified_streamline_cython,
                              modified_streamline_python, 5)

    modified_streamline_cython = set_number_of_points(streamline_64bit,
                                                      nb_points)
    modified_streamline_python = set_number_of_points_python(
        streamline_64bit, nb_points)
    assert_equal(len(modified_streamline_cython), nb_points)
    assert_array_almost_equal(modified_streamline_cython,
                              modified_streamline_python)

    res = []
    simple_streamline = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
    for nb_points in range(2, 200):
        modified_streamline_cython = set_number_of_points(
            simple_streamline, nb_points)
        res.append(nb_points - len(modified_streamline_cython))
    assert_equal(np.sum(res), 0)

    # Test resampling of multiple streamlines of different nb_points
    nb_points = 12
    modified_streamlines_cython = set_number_of_points(streamlines, nb_points)

    for i, s in enumerate(streamlines):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        # Using a 5 digits precision because of streamline is in float32.
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python, 5)

    modified_streamlines_cython = set_number_of_points(streamlines_64bit,
                                                       nb_points)

    for i, s in enumerate(streamlines_64bit):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python)

    # Test streamlines with mixed dtype
    streamlines_mixed_dtype = [
        streamline,
        streamline.astype(np.float64),
        streamline.astype(np.int32),
        streamline.astype(np.int64)
    ]
    nb_points_mixed_dtype = [
        len(s)
        for s in set_number_of_points(streamlines_mixed_dtype, nb_points)
    ]
    assert_array_equal(nb_points_mixed_dtype,
                       [nb_points] * len(streamlines_mixed_dtype))

    # Test streamlines with different shape
    modified_streamlines_cython = set_number_of_points(
        heterogeneous_streamlines, nb_points)

    for i, s in enumerate(heterogeneous_streamlines):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python)

    # Test streamline with integer dtype
    modified_streamline = set_number_of_points(streamline.astype(np.int32))
    assert_true(modified_streamline.dtype == np.float32)
    modified_streamline = set_number_of_points(streamline.astype(np.int64))
    assert_true(modified_streamline.dtype == np.float64)

    # Test empty list
    assert_equal(set_number_of_points([]), [])

    # Test streamline having only one point
    assert_raises(ValueError, set_number_of_points, np.array([[1, 2, 3]]))

    # We do not support list of lists, it should be numpy ndarray.
    streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
    assert_raises(AttributeError, set_number_of_points, streamline_unsupported)

    # Test setting number of points of a numpy with flag WRITABLE=False
    streamline_readonly = streamline.copy()
    streamline_readonly.setflags(write=False)
    assert_equal(len(set_number_of_points(streamline_readonly, nb_points=42)),
                 42)

    # Test setting computing length of a numpy with flag WRITABLE=False
    streamlines_readonly = []
    for s in streamlines:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
                 len(streamlines_readonly))

    streamlines_readonly = []
    for s in streamlines_64bit:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
                 len(streamlines_readonly))

    # Test if nb_points is less than 2
    assert_raises(ValueError,
                  set_number_of_points, [np.ones(
                      (10, 3)), np.ones((10, 3))],
                  nb_points=1)