예제 #1
0
    def _register_neighb_to_model(self, model_bundle, neighb_streamlines,
                                  metric=None, x0=None, bounds=None,
                                  select_model=400, select_target=600,
                                  method='L-BFGS-B',
                                  nb_pts=20, num_threads=None):

        if self.verbose:
            print('# Local SLR of neighb_streamlines to model')
            t = time()

        if metric is None or metric == 'symmetric':
            metric = BundleMinDistanceMetric(num_threads=num_threads)
        if metric == 'asymmetric':
            metric = BundleMinDistanceAsymmetricMetric()
        if metric == 'diagonal':
            metric = BundleSumDistanceMatrixMetric()

        if x0 is None:
            x0 = 'similarity'

        if bounds is None:
            bounds = [(-30, 30), (-30, 30), (-30, 30),
                      (-45, 45), (-45, 45), (-45, 45), (0.8, 1.2)]

        # TODO this can be speeded up by using directly the centroids
        static = select_random_set_of_streamlines(model_bundle,
                                                  select_model, rng=self.rng)
        moving = select_random_set_of_streamlines(neighb_streamlines,
                                                  select_target, rng=self.rng)

        static = set_number_of_points(static, nb_pts)
        moving = set_number_of_points(moving, nb_pts)

        slr = StreamlineLinearRegistration(metric=metric, x0=x0,
                                           bounds=bounds,
                                           method=method)
        slm = slr.optimize(static, moving)

        transf_streamlines = neighb_streamlines.copy()
        transf_streamlines._data = apply_affine(
            slm.matrix, transf_streamlines._data)

        transf_matrix = slm.matrix
        slr_bmd = slm.fopt
        slr_iterations = slm.iterations

        if self.verbose:
            print(' Square-root of BMD is %.3f' % (np.sqrt(slr_bmd),))
            if slr_iterations is not None:
                print(' Number of iterations %d' % (slr_iterations,))
            print(' Matrix size {}'.format(slm.matrix.shape))
            original = np.get_printoptions()
            np.set_printoptions(3, suppress=True)
            print(transf_matrix)
            print(slm.xopt)
            np.set_printoptions(**original)

            print(' Duration %0.3f sec. \n' % (time() - t,))

        return transf_streamlines, slr_bmd
예제 #2
0
def test_rigid_real_bundles():

    bundle_initial = fornix_streamlines()[:20]
    bundle, shift = center_streamlines(bundle_initial)

    mat = compose_matrix44([0, 0, 20, 45., 0, 0])

    bundle2 = transform_streamlines(bundle, mat)

    bundle_sum_distance = BundleSumDistanceMatrixMetric()
    srr = StreamlineLinearRegistration(bundle_sum_distance,
                                       x0=np.zeros(6),
                                       method='Powell')
    new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)

    evaluate_convergence(bundle, new_bundle2)

    bundle_min_distance = BundleMinDistanceMatrixMetric()
    srr = StreamlineLinearRegistration(bundle_min_distance,
                                       x0=np.zeros(6),
                                       method='Powell')
    new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)

    evaluate_convergence(bundle, new_bundle2)

    assert_raises(ValueError, StreamlineLinearRegistration, method='Whatever')
예제 #3
0
def runslr(fixed, moving, npts=20):
    fixed_subsamp = set_number_of_points(fixed, npts)
    moving_subsamp = set_number_of_points(moving, npts)

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=fixed_subsamp, moving=moving_subsamp)
    aligned = srm.transform(moving)
    return aligned
예제 #4
0
def runslr(fixed, moving, npts=20, bounds=None, verbose=False):
    fixed_subsamp = set_number_of_points(fixed, npts)
    moving_subsamp = set_number_of_points(moving, npts)

    srr = StreamlineLinearRegistration(bounds=bounds, verbose=verbose)
    #print(srr.verbose)
    srm = srr.optimize(static=fixed_subsamp, moving=moving_subsamp)
    aligned = srm.transform(moving)
    return aligned
예제 #5
0
def test_cascade_of_optimizations_and_threading():

    cingulum_bundles = two_cingulum_bundles()

    cb1 = cingulum_bundles[0]
    cb1 = set_number_of_points(cb1, 20)

    test_x0 = np.array([10, 4, 3, 0, 20, 10, 1.5, 1.5, 1.5, 0., 0.2, 0])

    cb2 = transform_streamlines(cingulum_bundles[0], compose_matrix44(test_x0))
    cb2 = set_number_of_points(cb2, 20)

    print('first rigid')
    slr = StreamlineLinearRegistration(x0=6, num_threads=1)
    slm = slr.optimize(cb1, cb2)

    print('then similarity')
    slr2 = StreamlineLinearRegistration(x0=7, num_threads=2)
    slm2 = slr2.optimize(cb1, cb2, slm.matrix)

    print('then affine')
    slr3 = StreamlineLinearRegistration(x0=12,
                                        options={'maxiter': 50},
                                        num_threads=None)
    slm3 = slr3.optimize(cb1, cb2, slm2.matrix)

    assert_(slm2.fopt < slm.fopt)
    assert_(slm3.fopt < slm2.fopt)
def make_kesh_template(bundle_list,
                       keystone_boi,
                       qb_thresh=5.,
                       Nsubsamp=20,
                       clsz_thresh=5,
                       keystone2MNI_xfm=None,
                       verbose=False):
    '''
    bundle_list: list of independent bundles (lists) not assumed to be in the same space
    keystone_boi: bundle (list) of streamlines that will be the anchor bundle all
                  others are registered to for the template
    qb_thresh: threshold for quickbundle (determines how finely each bundle is clustered)
    Nsubsamp: subsampling for quickbundles and SLR
    clsz_thresh: how many streamlines a cluster must have to be included in the template*
    keystone2MNI_SLR: streamlinear registration between the whole brain keystone and MNI**
    verbose: if you want to print info about each bundle as it runs set this to True


    *qb_thresh adn clsz_thresh are related. If you have a fine parcellation
    (low qb_thresh) then the clsz_threshold should be quite low since clusters
    will be small.

    **PROVIDE THIS IF (and only if) YOU WANT THE RESULT TO BE IN MNI SPACE OTHERWISE
    IT WILL BE IN KEYSTONE SPACE
    '''

    kesh_template_sls = []
    rejected_sls = []
    boi_sls_subsamp = set_number_of_points(keystone_boi, Nsubsamp)
    for i, sls in enumerate(bundle_list):
        print(len(bundle_list) - i)
        sls_subsamp = set_number_of_points(sls, Nsubsamp)
        qb = QuickBundles(threshold=qb_thresh)
        clusters = qb.cluster(sls)
        cluster_sizes = [len(cl) for cl in clusters]
        # enforce that clusters smaller than a threshold are not in template
        centroids = clusters.centroids
        slr = StreamlineLinearRegistration()
        srm = slr.optimize(static=boi_sls_subsamp, moving=sls_subsamp)
        xfmd_centroids = srm.transform(centroids)
        # NOTE: we actually want to upsample the centroids so the template has
        # better properties... what's the most efficient way to do that?
        for j, b in enumerate(xfmd_centroids):
            if cluster_sizes[j] < clsz_thresh:
                rejected_sls.append(xfmd_centroids.pop(j))
        kesh_template_sls += xfmd_centroids
        if verbose:
            print('Bundle %i' % i)
            print('N centroids: %i' % len(centroids))
            print('kept %i rejected %i total %i' %
                  (len(kesh_template_sls), len(rejected_sls), len(clusters)))
    if keystone2MNI_xfm:
        print('MNI YAY!')
    return kesh_template_sls, rejected_sls
예제 #7
0
def test_evolution_of_previous_iterations():

    static = fornix_streamlines()[:20]
    moving = fornix_streamlines()[:20]

    moving = [m + np.array([10., 0., 0.]) for m in moving]

    slr = StreamlineLinearRegistration(evolution=True)

    slm = slr.optimize(static, moving)

    assert_equal(len(slm.matrix_history), slm.iterations)
예제 #8
0
def tractograms_slr(moving_tractogram, static_tractogram):

    subjID = ntpath.basename(static_tractogram)[0:6]
    exID = ntpath.basename(moving_tractogram)[0:6]

    aff_dir = '/N/dc2/projects/lifebid/giulia/data/HCP3-IU-Giulia/derivatives/slr_transformations'
    affine_path = '%s/affine_m%s_s%s.npy' % (aff_dir, exID, subjID)
    affine_fname = './affine_m%s_s%s.npy' % (exID, subjID)

    if isfile(affine_path):
        print("Affine already computed. Retrieving past results.")
        copyfile(affine_path, affine_fname)

    else:
        print("Loading tractograms...")
        moving_tractogram = nib.streamlines.load(moving_tractogram)
        moving_tractogram = moving_tractogram.streamlines
        static_tractogram = nib.streamlines.load(static_tractogram)
        static_tractogram = static_tractogram.streamlines

        print("Set parameters as in Garyfallidis et al. 2015.")
        threshold_length = 40.0  # 50mm / 1.25
        qb_threshold = 16.0  # 20mm / 1.25
        nb_res_points = 20

        print("Performing QuickBundles of static tractogram and resampling...")
        st = np.array(
            [s for s in static_tractogram if len(s) > threshold_length],
            dtype=np.object)
        qb = QuickBundles(threshold=qb_threshold)
        st_clusters = [cluster.centroid for cluster in qb.cluster(st)]
        st_clusters = set_number_of_points(st_clusters, nb_res_points)

        print("Performing QuickBundles of moving tractogram and resampling...")
        mt = np.array(
            [s for s in moving_tractogram if len(s) > threshold_length],
            dtype=np.object)
        qb = QuickBundles(threshold=qb_threshold)
        mt_clusters = [cluster.centroid for cluster in qb.cluster(mt)]
        mt_clusters = set_number_of_points(mt_clusters, nb_res_points)

        print("Performing Linear Registration...")
        srr = StreamlineLinearRegistration()
        srm = srr.optimize(static=st_clusters, moving=mt_clusters)

        print(
            "Affine transformation matrix with Streamline Linear Registration:"
        )
        affine = srm.matrix
        print('%s' % affine)

        np.save('affine_m%s_s%s.npy' % (exID, subjID), affine)
        print("Affine for example %s and target %s saved." % (exID, subjID))
예제 #9
0
def _affine_slr(sft_bundle, sft_centroid):
    x0 = np.zeros((7, ))
    bounds_dof = [(-10, 10), (-10, 10), (-10, 10), (-5, 5), (-5, 5), (-5, 5),
                  (0.95, 1.05)]
    metric = BundleMinDistanceMetric(num_threads=1)
    slr = StreamlineLinearRegistration(metric=metric,
                                       method="L-BFGS-B",
                                       bounds=bounds_dof,
                                       x0=x0,
                                       num_threads=1)
    tmp_bundle = set_number_of_points(sft_bundle.streamlines.copy(), 20)
    tmp_centroid = set_number_of_points(sft_centroid.streamlines.copy(), 20)
    slm = slr.optimize(tmp_bundle, tmp_centroid)
    sft_centroid.streamlines = transform_streamlines(sft_centroid.streamlines,
                                                     slm.matrix)
    return sft_centroid
예제 #10
0
def test_evolution_of_previous_iterations():

    static = fornix_streamlines()[:20]
    moving = fornix_streamlines()[:20]

    moving = [m + np.array([10., 0., 0.]) for m in moving]

    slr = StreamlineLinearRegistration(evolution=True)

    from dipy.core.optimize import SCIPY_LESS_0_12

    if not SCIPY_LESS_0_12:

        slm = slr.optimize(static, moving)

        assert_equal(len(slm.matrix_history), slm.iterations)
예제 #11
0
def streamline_registration(moving,
                            static,
                            n_points=100,
                            native_resampled=False):
    """
    Register two collections of streamlines ('bundles') to each other

    Parameters
    ----------
    moving, static : lists of 3 by n, or str
        The two bundles to be registered. Given either as lists of arrays with
        3D coordinates, or strings containing full paths to these files.

    n_points : int, optional
        How many points to resample to. Default: 100.

    native_resampled : bool, optional
        Whether to return the moving bundle in the original space, but
        resampled in the static space to n_points.

    Returns
    -------
    aligned : list
        Streamlines from the moving group, moved to be closely matched to
        the static group.

    matrix : array (4, 4)
        The affine transformation that takes us from 'moving' to 'static'
    """
    # Load the streamlines, if you were given a file-name
    if isinstance(moving, str):
        moving = load_trk(moving, 'same', bbox_valid_check=False).streamlines
    if isinstance(static, str):
        static = load_trk(static, 'same', bbox_valid_check=False).streamlines

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=set_number_of_points(static, n_points),
                       moving=set_number_of_points(moving, n_points))

    aligned = srm.transform(moving)
    if native_resampled:
        aligned = set_number_of_points(aligned, n_points)
        aligned = transform_tracking_output(aligned, np.linalg.inv(srm.matrix))

    return aligned, srm.matrix
예제 #12
0
def test_rigid_parallel_lines():

    bundle_initial = simulated_bundle()
    bundle, shift = center_streamlines(bundle_initial)
    mat = compose_matrix44([20, 0, 10, 0, 40, 0])

    bundle2 = transform_streamlines(bundle, mat)

    bundle_sum_distance = BundleSumDistanceMatrixMetric()
    options = {'maxcor': 100, 'ftol': 1e-9, 'gtol': 1e-16, 'eps': 1e-3}
    srr = StreamlineLinearRegistration(metric=bundle_sum_distance,
                                       x0=np.zeros(6),
                                       method='L-BFGS-B',
                                       bounds=None,
                                       options=options)

    new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
    evaluate_convergence(bundle, new_bundle2)
예제 #13
0
def test_rigid_parallel_lines():

    bundle_initial = simulated_bundle()
    bundle, shift = center_streamlines(bundle_initial)
    mat = compose_matrix44([20, 0, 10, 0, 40, 0])

    bundle2 = transform_streamlines(bundle, mat)

    bundle_sum_distance = BundleSumDistanceMatrixMetric()
    options = {'maxcor': 100, 'ftol': 1e-9, 'gtol': 1e-16, 'eps': 1e-3}
    srr = StreamlineLinearRegistration(metric=bundle_sum_distance,
                                       x0=np.zeros(6),
                                       method='L-BFGS-B',
                                       bounds=None,
                                       options=options)

    new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
    evaluate_convergence(bundle, new_bundle2)
예제 #14
0
def test_rigid_partial_real_bundles():

    static = fornix_streamlines()[:20]
    moving = fornix_streamlines()[20:40]
    static_center, shift = center_streamlines(static)
    moving_center, shift2 = center_streamlines(moving)

    print(shift2)
    mat = compose_matrix(translate=np.array([0, 0, 0.]),
                         angles=np.deg2rad([40, 0, 0.]))
    moved = transform_streamlines(moving_center, mat)

    srr = StreamlineLinearRegistration()

    srm = srr.optimize(static_center, moved)
    print(srm.fopt)
    print(srm.iterations)
    print(srm.funcs)

    moving_back = srm.transform(moved)
    print(srm.matrix)

    static_center = set_number_of_points(static_center, 100)
    moving_center = set_number_of_points(moving_back, 100)

    vol = np.zeros((100, 100, 100))
    spts = np.concatenate(static_center, axis=0)
    spts = np.round(spts).astype(np.int) + np.array([50, 50, 50])

    mpts = np.concatenate(moving_center, axis=0)
    mpts = np.round(mpts).astype(np.int) + np.array([50, 50, 50])

    for index in spts:
        i, j, k = index
        vol[i, j, k] = 1

    vol2 = np.zeros((100, 100, 100))
    for index in mpts:
        i, j, k = index
        vol2[i, j, k] = 1

    overlap = np.sum(np.logical_and(vol, vol2)) / float(np.sum(vol2))

    assert_equal(overlap * 100 > 40, True)
예제 #15
0
def test_rigid_partial_real_bundles():

    static = fornix_streamlines()[:20]
    moving = fornix_streamlines()[20:40]
    static_center, shift = center_streamlines(static)
    moving_center, shift2 = center_streamlines(moving)

    print(shift2)
    mat = compose_matrix(translate=np.array([0, 0, 0.]),
                         angles=np.deg2rad([40, 0, 0.]))
    moved = transform_streamlines(moving_center, mat)

    srr = StreamlineLinearRegistration()

    srm = srr.optimize(static_center, moved)
    print(srm.fopt)
    print(srm.iterations)
    print(srm.funcs)

    moving_back = srm.transform(moved)
    print(srm.matrix)

    static_center = set_number_of_points(static_center, 100)
    moving_center = set_number_of_points(moving_back, 100)

    vol = np.zeros((100, 100, 100))
    spts = np.concatenate(static_center, axis=0)
    spts = np.round(spts).astype(np.int) + np.array([50, 50, 50])

    mpts = np.concatenate(moving_center, axis=0)
    mpts = np.round(mpts).astype(np.int) + np.array([50, 50, 50])

    for index in spts:
        i, j, k = index
        vol[i, j, k] = 1

    vol2 = np.zeros((100, 100, 100))
    for index in mpts:
        i, j, k = index
        vol2[i, j, k] = 1

    overlap = np.sum(np.logical_and(vol, vol2)) / float(np.sum(vol2))

    assert_equal(overlap * 100 > 40, True)
예제 #16
0
def streamline_registration(moving, static, n_points=100,
                            native_resampled=False):
    """
    Register two collections of streamlines ('bundles') to each other

    Parameters
    ----------
    moving, static : lists of 3 by n, or str
        The two bundles to be registered. Given either as lists of arrays with
        3D coordinates, or strings containing full paths to these files.

    n_points : int, optional
        How many points to resample to. Default: 100.

    native_resampled : bool, optional
        Whether to return the moving bundle in the original space, but
        resampled in the static space to n_points.

    Returns
    -------
    aligned : list
        Streamlines from the moving group, moved to be closely matched to
        the static group.

    matrix : array (4, 4)
        The affine transformation that takes us from 'moving' to 'static'
    """
    # Load the streamlines, if you were given a file-name
    if isinstance(moving, str):
        moving = sut.read_trk(moving)
    if isinstance(static, str):
        static = sut.read_trk(static)

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=set_number_of_points(static, n_points),
                       moving=set_number_of_points(moving, n_points))

    aligned = srm.transform(moving)
    if native_resampled:
        aligned = set_number_of_points(aligned, n_points)
        aligned = move_streamlines(aligned, np.linalg.inv(srm.matrix))

    return aligned, srm.matrix
예제 #17
0
def streamline_based_registration(source_tractography_streamlines,
                                  target_tractography_streamlines,
                                  subject_pair):

    intersection_voxel_list = []
    target_voxel_list = []
    n_points = 20
    srr = StreamlineLinearRegistration()
    SAr = [downsample(i, n_points) for i in source_tractography]
    SBr = [downsample(i, n_points) for i in target_tractography]
    srm = srr.optimize(static=SBr, moving=SAr)
    transformed_tractography = srm.transform(source_tractography)

    print len(transformed_tractography)
    temp_index = 0
    for i in range(len(source_tractography_streamlines)):

        voxel_and, voxel_target = voxel_measure(
            transformed_tractography[
                temp_index:temp_index + source_tractography_streamlines[
                    source + '_' + subject_tracts[str(subject_pair)][i]][1]],
            target_tractography_streamlines[
                target + '_' + subject_tracts[str(subject_pair)][i]][0])

        temp_index = temp_index + source_tractography_streamlines[
            source + '_' + subject_tracts[str(subject_pair)][i]][1]

        intersection_voxel_list.append(voxel_and)
        target_voxel_list.append(voxel_target)

    total_intersection_voxel_list = np.sum(np.array(intersection_voxel_list))
    total_target_voxel_list = np.sum(np.array(target_voxel_list))

    print "Number of voxel per tract"
    print intersection_voxel_list, target_voxel_list

    print "Number of voxel"
    print total_intersection_voxel_list, total_target_voxel_list
    TDA_all_voxel_registration = np.divide(total_intersection_voxel_list,
                                           total_target_voxel_list)

    print "Modified-TDR-for-all"
    print TDA_all_voxel_registration
예제 #18
0
def test_stream_rigid():

    static = fornix_streamlines()[:20]
    moving = fornix_streamlines()[20:40]
    static_center, shift = center_streamlines(static)

    mat = compose_matrix44([0, 0, 0, 0, 40, 0])
    moving = transform_streamlines(moving, mat)

    srr = StreamlineLinearRegistration()
    sr_params = srr.optimize(static, moving)
    moved = transform_streamlines(moving, sr_params.matrix)

    srr = StreamlineLinearRegistration(verbose=True)
    srm = srr.optimize(static, moving)
    moved2 = transform_streamlines(moving, srm.matrix)
    moved3 = srm.transform(moving)

    assert_array_almost_equal(moved[0], moved2[0], decimal=3)
    assert_array_almost_equal(moved2[0], moved3[0], decimal=3)
예제 #19
0
def test_same_number_of_points():

    A = [np.random.rand(10, 3), np.random.rand(20, 3)]
    B = [np.random.rand(21, 3), np.random.rand(30, 3)]
    C = [np.random.rand(10, 3), np.random.rand(10, 3)]
    D = [np.random.rand(20, 3), np.random.rand(20, 3)]

    slr = StreamlineLinearRegistration()
    assert_raises(ValueError, slr.optimize, A, B)
    assert_raises(ValueError, slr.optimize, C, D)
    assert_raises(ValueError, slr.optimize, C, B)
예제 #20
0
def test_similarity_real_bundles():

    bundle_initial = fornix_streamlines()
    bundle_initial, shift = center_streamlines(bundle_initial)
    bundle = bundle_initial[:20]
    xgold = [0, 0, 10, 0, 0, 0, 1.5]
    mat = compose_matrix44(xgold)
    bundle2 = transform_streamlines(bundle_initial[:20], mat)

    metric = BundleMinDistanceMatrixMetric()
    x0 = np.array([0, 0, 0, 0, 0, 0, 1], 'f8')

    slr = StreamlineLinearRegistration(metric=metric,
                                       x0=x0,
                                       method='Powell',
                                       bounds=None,
                                       verbose=False)

    slm = slr.optimize(bundle, bundle2)
    new_bundle2 = slm.transform(bundle2)
    evaluate_convergence(bundle, new_bundle2)
예제 #21
0
def test_similarity_real_bundles():

    bundle_initial = fornix_streamlines()
    bundle_initial, shift = center_streamlines(bundle_initial)
    bundle = bundle_initial[:20]
    xgold = [0, 0, 10, 0, 0, 0, 1.5]
    mat = compose_matrix44(xgold)
    bundle2 = transform_streamlines(bundle_initial[:20], mat)

    metric = BundleMinDistanceMatrixMetric()
    x0 = np.array([0, 0, 0, 0, 0, 0, 1], 'f8')

    slr = StreamlineLinearRegistration(metric=metric,
                                       x0=x0,
                                       method='Powell',
                                       bounds=None,
                                       verbose=False)

    slm = slr.optimize(bundle, bundle2)
    new_bundle2 = slm.transform(bundle2)
    evaluate_convergence(bundle, new_bundle2)
예제 #22
0
def test_rigid_real_bundles():

    bundle_initial = fornix_streamlines()[:20]
    bundle, shift = center_streamlines(bundle_initial)

    mat = compose_matrix44([0, 0, 20, 45., 0, 0])

    bundle2 = transform_streamlines(bundle, mat)

    bundle_sum_distance = BundleSumDistanceMatrixMetric()
    srr = StreamlineLinearRegistration(bundle_sum_distance,
                                       x0=np.zeros(6),
                                       method='Powell')
    new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)

    evaluate_convergence(bundle, new_bundle2)

    bundle_min_distance = BundleMinDistanceMatrixMetric()
    srr = StreamlineLinearRegistration(bundle_min_distance,
                                       x0=np.zeros(6),
                                       method='Powell')
    new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)

    evaluate_convergence(bundle, new_bundle2)

    assert_raises(ValueError, StreamlineLinearRegistration, method='Whatever')
예제 #23
0
def test_cascade_of_optimizations():

    cingulum_bundles = two_cingulum_bundles()

    cb1 = cingulum_bundles[0]
    cb1 = set_number_of_points(cb1, 20)

    test_x0 = np.array([10, 4, 3, 0, 20, 10, 1.5, 1.5, 1.5, 0., 0.2, 0])

    cb2 = transform_streamlines(cingulum_bundles[0],
                                compose_matrix44(test_x0))
    cb2 = set_number_of_points(cb2, 20)

    print('first rigid')
    slr = StreamlineLinearRegistration(x0=6)
    slm = slr.optimize(cb1, cb2)

    print('then similarity')
    slr2 = StreamlineLinearRegistration(x0=7)
    slm2 = slr2.optimize(cb1, cb2, slm.matrix)

    print('then affine')
    slr3 = StreamlineLinearRegistration(x0=12, options={'maxiter': 50})
    slm3 = slr3.optimize(cb1, cb2, slm2.matrix)

    assert_(slm2.fopt < slm.fopt)
    assert_(slm3.fopt < slm2.fopt)
def tractograms_slr(moving_tractogram, static_tractogram):

    subjID = ntpath.basename(static_tractogram)[0:6]
    exID = ntpath.basename(moving_tractogram)[0:6]

    print("Loading tractograms...")
    moving_tractogram = nib.streamlines.load(moving_tractogram)
    moving_tractogram = moving_tractogram.streamlines
    static_tractogram = nib.streamlines.load(static_tractogram)
    static_tractogram = static_tractogram.streamlines

    print("Set parameters as in Garyfallidis et al. 2015.")
    threshold_length = 40.0  # 50mm / 1.25
    qb_threshold = 16.0  # 20mm / 1.25
    nb_res_points = 20

    print("Performing QuickBundles of static tractogram and resampling...")
    st = np.array([s for s in static_tractogram if len(s) > threshold_length],
                  dtype=np.object)
    qb = QuickBundles(threshold=qb_threshold)
    st_clusters = [cluster.centroid for cluster in qb.cluster(st)]
    st_clusters = set_number_of_points(st_clusters, nb_res_points)

    print("Performing QuickBundles of moving tractogram and resampling...")
    mt = np.array([s for s in moving_tractogram if len(s) > threshold_length],
                  dtype=np.object)
    qb = QuickBundles(threshold=qb_threshold)
    mt_clusters = [cluster.centroid for cluster in qb.cluster(mt)]
    mt_clusters = set_number_of_points(mt_clusters, nb_res_points)

    print("Performing Linear Registration...")
    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=st_clusters, moving=mt_clusters)

    print("Affine transformation matrix with Streamline Linear Registration:")
    affine = srm.matrix
    print('%s' % affine)

    np.save('affine_m%s_s%s.npy' % (exID, subjID), affine)
    print("Affine for example %s and target %s saved." % (exID, subjID))
예제 #25
0
def bundle_registration(cb_subj1, cb_subj2, pts=12):
    """
    Register two bundle from two subjects
    directly in the space of streamlines
    Parameters
    ----------
    cb_subj1: first subject's bundle
    cb_subj2: second subject's bundle
    pts: each streamline is divided into sections

    Return
    ------
    registration bundle
    """
    cb_subj1 = set_number_of_points(cb_subj1, pts)
    cb_subj2 = set_number_of_points(cb_subj2, pts)

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=cb_subj1, moving=cb_subj2)
    cb_subj2_aligned = srm.transform(cb_subj2)

    return cb_subj2_aligned
예제 #26
0
def test_x0_input():

    for x0 in [6, 7, 12, "Rigid", 'rigid', "similarity", "Affine"]:
        StreamlineLinearRegistration(x0=x0)

    for x0 in [np.random.rand(6), np.random.rand(7), np.random.rand(12)]:
        StreamlineLinearRegistration(x0=x0)

    for x0 in [8, 20, "Whatever", np.random.rand(20), np.random.rand(20, 3)]:
        assert_raises(ValueError, StreamlineLinearRegistration, x0=x0)

    x0 = np.random.rand(4, 3)
    assert_raises(ValueError, StreamlineLinearRegistration, x0=x0)

    x0_6 = np.zeros(6)
    x0_7 = np.array([0, 0, 0, 0, 0, 0, 1.])
    x0_12 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1., 0, 0, 0])

    x0_s = [x0_6, x0_7, x0_12, x0_6, x0_7, x0_12]

    for i, x0 in enumerate([6, 7, 12, "Rigid", "similarity", "Affine"]):
        slr = StreamlineLinearRegistration(x0=x0)
        assert_equal(slr.x0, x0_s[i])
예제 #27
0
def register(static, moving, points=20):
    r""" Make StreamlineLinearRegistration simpler to use

    Parameters:
    ----------
    :param static: List of numpy.ndarray,
        it is the target bundle witch will be static during registration
    :param moving:List of numpy.ndarray,
        it is the target bundle witch will be moving during registration
    :param points: int,
        The bundles will be divided to this number
    :return: List of numpy.ndarray, numpy.array
        It return the aligned subject and transformation matrix as well.
    """

    cb_subj1 = set_number_of_points(static, points)
    cb_subj2 = set_number_of_points(moving, points)

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=cb_subj1, moving=cb_subj2)
    del cb_subj1
    del cb_subj2
    del static
    return srm.transform(moving)  # , srm.matrix
예제 #28
0
def test_affine_real_bundles():

    bundle_initial = fornix_streamlines()
    bundle_initial, shift = center_streamlines(bundle_initial)
    bundle = bundle_initial[:20]
    xgold = [0, 4, 2, 0, 10, 10, 1.2, 1.1, 1., 0., 0.2, 0.]
    mat = compose_matrix44(xgold)
    bundle2 = transform_streamlines(bundle_initial[:20], mat)

    x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1., 0, 0, 0])

    x = 25

    bounds = [(-x, x), (-x, x), (-x, x),
              (-x, x), (-x, x), (-x, x),
              (0.1, 1.5), (0.1, 1.5), (0.1, 1.5),
              (-1, 1), (-1, 1), (-1, 1)]

    options = {'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-5, 'eps': 1e-8}

    metric = BundleMinDistanceMatrixMetric()

    slr = StreamlineLinearRegistration(metric=metric,
                                       x0=x0,
                                       method='L-BFGS-B',
                                       bounds=bounds,
                                       verbose=True,
                                       options=options)
    slm = slr.optimize(bundle, bundle2)

    new_bundle2 = slm.transform(bundle2)

    slr2 = StreamlineLinearRegistration(metric=metric,
                                        x0=x0,
                                        method='Powell',
                                        bounds=None,
                                        verbose=True,
                                        options=None)

    slm2 = slr2.optimize(bundle, new_bundle2)

    new_bundle2 = slm2.transform(new_bundle2)

    evaluate_convergence(bundle, new_bundle2)
예제 #29
0
def test_stream_rigid():

    static = fornix_streamlines()[:20]
    moving = fornix_streamlines()[20:40]
    center_streamlines(static)

    mat = compose_matrix44([0, 0, 0, 0, 40, 0])
    moving = transform_streamlines(moving, mat)

    srr = StreamlineLinearRegistration()
    sr_params = srr.optimize(static, moving)
    moved = transform_streamlines(moving, sr_params.matrix)

    srr = StreamlineLinearRegistration(verbose=True)
    srm = srr.optimize(static, moving)
    moved2 = transform_streamlines(moving, srm.matrix)
    moved3 = srm.transform(moving)

    assert_array_almost_equal(moved[0], moved2[0], decimal=3)
    assert_array_almost_equal(moved2[0], moved3[0], decimal=3)
예제 #30
0
def test_wrong_num_threads():
    A = [np.random.rand(10, 3), np.random.rand(10, 3)]
    B = [np.random.rand(10, 3), np.random.rand(10, 3)]

    slr = StreamlineLinearRegistration(num_threads=0)
    assert_raises(ValueError, slr.optimize, A, B)
#X_grid_size=header1['dimensions'][0]
#Y_grid_size=header1['dimensions'][1]
#Z_grid_size=header1['dimensions'][2]

#%% Resample Streamlines
print("setting the same number of points for both the tracts...")
track_moving = set_number_of_points(track_moving, N_points)
track_fixed = set_number_of_points(track_fixed, N_points)
#%% SLR

if SLR_flag:
    print("Linear Registration of the tractcs using SLR...")
    bounds = [(-np.inf, np.inf), (-np.inf, np.inf), (-np.inf, np.inf), (0, 0),
              (0, 0), (0, 0)]
    srr = StreamlineLinearRegistration(bounds=bounds)
    srm = srr.optimize(static=track_fixed, moving=track_moving)
    track_moving = srm.transform(track_moving)
    if show_b is True:
        show_both_bundles([track_moving, track_fixed],
                          colors=[window.colors.orange, window.colors.red],
                          show=True,
                          fname=save_dir + 'after_registration.png')

track_moving = np.array(track_moving)
track_fixed = np.array(track_fixed)

#%% Sparse
suffix_list = []
col_ind_list = []
if sparse_flag:
예제 #32
0
    def _register_model_to_neighb(self,
                                  slr_num_thread=1,
                                  select_model=1000,
                                  select_target=1000,
                                  slr_transform_type='scaling'):
        """
        Parameters
        ----------
        slr_num_thread : int
            Number of threads for SLR.
            Should remain 1 for nearly all use-case.
        select_model : int
            Maximum number of clusters to select from the model.
        select_target : int
            Maximum number of clusters to select from the neighborhood.
        slr_transform_type : str
            Define the transformation for the local SLR.
            [translation, rigid, similarity, scaling].

        Returns
        -------
        transf_neighbor : list
            The neighborhood clusters transformed into model space.
        """
        possible_slr_transform_type = {
            'translation': 0,
            'rigid': 1,
            'similarity': 2,
            'scaling': 3
        }
        static = select_random_set_of_streamlines(self.model_centroids,
                                                  select_model, self.rng)
        moving = select_random_set_of_streamlines(self.neighb_centroids,
                                                  select_target, self.rng)

        # Tuple 0,1,2 are the min & max bound in x,y,z for translation
        # Tuple 3,4,5 are the min & max bound in x,y,z for rotation
        # Tuple 6,7,8 are the min & max bound in x,y,z for scaling
        # For uniform scaling (similarity), tuple #6 is enough
        bounds_dof = [(-20, 20), (-20, 20), (-20, 20), (-10, 10), (-10, 10),
                      (-10, 10), (0.8, 1.2), (0.8, 1.2), (0.8, 1.2)]
        metric = BundleMinDistanceMetric(num_threads=slr_num_thread)
        slr_transform_type_id = possible_slr_transform_type[slr_transform_type]
        if slr_transform_type_id >= 0:
            init_transfo_dof = np.zeros(3)
            slr = StreamlineLinearRegistration(metric=metric,
                                               method="Powell",
                                               x0=init_transfo_dof,
                                               bounds=bounds_dof[:3],
                                               num_threads=slr_num_thread)
            slm = slr.optimize(static, moving)

        if slr_transform_type_id >= 1:
            init_transfo_dof = np.zeros(6)
            init_transfo_dof[:3] = slm.xopt

            slr = StreamlineLinearRegistration(metric=metric,
                                               x0=init_transfo_dof,
                                               bounds=bounds_dof[:6],
                                               num_threads=slr_num_thread)
            slm = slr.optimize(static, moving)

        if slr_transform_type_id >= 2:
            if slr_transform_type_id == 2:
                init_transfo_dof = np.zeros(7)
                init_transfo_dof[:6] = slm.xopt
                init_transfo_dof[6] = 1.

                slr = StreamlineLinearRegistration(metric=metric,
                                                   x0=init_transfo_dof,
                                                   bounds=bounds_dof[:7],
                                                   num_threads=slr_num_thread)
                slm = slr.optimize(static, moving)

            else:
                init_transfo_dof = np.zeros(9)
                init_transfo_dof[:6] = slm.xopt[:6]
                init_transfo_dof[6:] = np.array((slm.xopt[6], ) * 3)

                slr = StreamlineLinearRegistration(metric=metric,
                                                   x0=init_transfo_dof,
                                                   bounds=bounds_dof[:9],
                                                   num_threads=slr_num_thread)
                slm = slr.optimize(static, moving)
        self.model_centroids = transform_streamlines(self.model_centroids,
                                                     np.linalg.inv(slm.matrix))
예제 #33
0
        #attach fa information
        native_target_streamlines = transform_streamlines(
            target_streamlines, np.linalg.inv(affine_fa))
        stream_fa = []
        stream_point_fa = []
        for s in range(len(native_target_streamlines)):
            point_fa = [
                fa[int(k[0]), int(k[1]), int(k[2])]
                for k in native_target_streamlines[s]
            ]
            stream_point_fa.append(point_fa)
            stream_fa.append(np.mean(point_fa))

        #registration
        srr = StreamlineLinearRegistration()
        srm = srr.optimize(static=target_clusters_control.centroids,
                           moving=target_clusters.centroids)
        target_str_aligned = srm.transform(target_streamlines)
        native_target_stream_aligned = transform_streamlines(
            target_str_aligned, np.linalg.inv(affine_fa))

        locals()['groupstreamlines' + str(k + 1)].extend(target_str_aligned)
        locals()['Nativegroupstreamlines' +
                 str(k + 1)].extend(native_target_stream_aligned)
        locals()['groupLinesFA' + str(k + 1)].extend(stream_fa)
        locals()['groupPointsFA' + str(k + 1)].extend(stream_point_fa)

        print('NO.' + str(j + 1) + ' ' + runno + " Nb. streamlines:",
              len(target_str_aligned))
예제 #34
0
"""
An important step before running the registration is to resample the
streamlines so that they both have the same number of points per streamline.
Here we will use 20 points. This step is not optional. Inputting streamlines
with different number of points will break the theoretical advantages of using
the SLR as explained in [Garyfallidis15]_.
"""

cb_subj1 = set_number_of_points(cb_subj1, 20)
cb_subj2 = set_number_of_points(cb_subj2, 20)
"""
Let's say now that we want to move the ``cb_subj2`` (moving) so that it can be
aligned with ``cb_subj1`` (static). Here is how this is done.
"""

srr = StreamlineLinearRegistration()

srm = srr.optimize(static=cb_subj1, moving=cb_subj2)
"""
After the optimization is finished we can apply the transformation to
``cb_subj2``.
"""

cb_subj2_aligned = srm.transform(cb_subj2)


def show_both_bundles(bundles, colors=None, show=False, fname=None):

    ren = fvtk.ren()
    ren.SetBackground(1., 1, 1)
    for (i, bundle) in enumerate(bundles):
                    if np.mean(bundles_ref[ref][bun_num])==0:
                        empty_bundles[ref] += 1
                    worksheet.write(bun_num+1, l+0, np.mean(bundles_ref[ref][bun_num]))
                    worksheet.write(bun_num+1, l+1, np.min(bundles_ref[ref][bun_num]))
                    worksheet.write(bun_num+1, l+2, np.max(bundles_ref[ref][bun_num]))
                    worksheet.write(bun_num+1, l+3, np.std(bundles_ref[ref][bun_num]))
                    l = l + 4
                bun_num+=1
            workbook.close()
        for ref in references:
            if empty_bundles[ref]>0:
                print(f'Found {empty_bundles} empty bundles out of {np.size(top_bundles)} for {ref} in group {group} for {region_connection}')


    if registration:
        srr = StreamlineLinearRegistration()
        for streamline,i in enumerate(selected_centroids[non_control]):
            srm = srr.optimize(static=selected_centroids[control], moving=streamline)
            streamlines[control][i] = srm.transform(streamline)

    from dipy.segment.metric import ResampleFeature, AveragePointwiseEuclideanMetric, mdf

    #dist_all = np.zeros((np.size(selected_bundles[control]), np.size(selected_bundles[non_control])))
    dist_all = np.zeros((num_bundles, num_bundles))


    """
    top_idx_group_control = sorted(range(len(selected_sizes[control])),
                            key=lambda i: selected_sizes[group][i], reverse=True)[:num_bundles]
    top_idx_group_noncontrol = sorted(range(len(selected_sizes[non_control])),
                            key=lambda i: selected_sizes[group][i], reverse=True)[:num_bundles]
예제 #36
0
An important step before running the registration is to resample the
streamlines so that they both have the same number of points per streamline.
Here we will use 20 points. This step is not optional. Inputting streamlines
with different number of points will break the theoretical advantages of using
the SLR as explained in [Garyfallidis15]_.
"""

cb_subj1 = set_number_of_points(cb_subj1, 20)
cb_subj2 = set_number_of_points(cb_subj2, 20)

"""
Let's say now that we want to move the ``cb_subj2`` (moving) so that it can be
aligned with ``cb_subj1`` (static). Here is how this is done.
"""

srr = StreamlineLinearRegistration()

srm = srr.optimize(static=cb_subj1, moving=cb_subj2)

"""
After the optimization is finished we can apply the transformation to
``cb_subj2``.
"""

cb_subj2_aligned = srm.transform(cb_subj2)


def show_both_bundles(bundles, colors=None, show=False, fname=None):

    ren = fvtk.ren()
    ren.SetBackground(1.0, 1, 1)
예제 #37
0
    if rank > data['num_max']:
        data['num_max'] = rank
        data['ref_idx'] = i

print data['num_max'], data['ref_idx']

from dipy.align.streamlinear import StreamlineLinearRegistration
from dipy.tracking.streamline import set_number_of_points


ref_idx = data['ref_idx']
p_per_strm =20

ref_vec = set_number_of_points(data['streamlines'][ref_idx], p_per_strm)

srr = StreamlineLinearRegistration()

for i,strm in enumerate(data['streamlines']):
    print 'registering %d/%d' % (i,len(data['file'])-1)
    print '# streamlines = %d' %len(strm)
    if len(strm) == 0 or i==ref_idx:
        print 'skipping'
        continue
    mov_vec = set_number_of_points(strm, 20)
    srm = srr.optimize(static=ref_vec, moving=mov_vec)
    data['aligned_strms'].append(srm.transform(mov_vec))

from dipy.viz import fvtk
ren = fvtk.ren()
ren.SetBackground(1., 1, 1)
예제 #38
0
def tractograms_slr(moving_tractogram, static_tractogram):

    table_filename = 'affine_dictionary.pickle'
    if isfile(table_filename):
        print("Retrieving past results from %s" % table_filename)
        table = pickle.load(open(table_filename))
    else:
        print("Creating a new table which will be saved in %s" %
              table_filename)
        table = {}

    moving_tractogram_basename = ntpath.basename(moving_tractogram)
    static_tractogram_basename = ntpath.basename(static_tractogram)
    key = tuple([(moving_tractogram_basename, static_tractogram_basename)])[0]

    if table.has_key(key):
        print("Affine already exists in %s" % table_filename)
        affine = table[moving_tractogram_basename,
                       static_tractogram_basename].items()[0][1]
    else:
        print("Loading tractograms...")
        moving_tractogram = nib.streamlines.load(moving_tractogram)
        moving_tractogram = moving_tractogram.streamlines
        static_tractogram = nib.streamlines.load(static_tractogram)
        static_tractogram = static_tractogram.streamlines

        print("Set parameters as in Garyfallidis et al. 2015.")
        threshold_length = 40.0  # 50mm / 1.25
        qb_threshold = 16.0  # 20mm / 1.25
        nb_res_points = 20

        print("Performing QuickBundles of static tractogram and resampling...")
        st = np.array(
            [s for s in static_tractogram if len(s) > threshold_length],
            dtype=np.object)
        qb = QuickBundles(threshold=qb_threshold)
        st_clusters = [cluster.centroid for cluster in qb.cluster(st)]
        st_clusters = set_number_of_points(st_clusters, nb_res_points)

        print("Performing QuickBundles of moving tractogram and resampling...")
        mt = np.array(
            [s for s in moving_tractogram if len(s) > threshold_length],
            dtype=np.object)
        qb = QuickBundles(threshold=qb_threshold)
        mt_clusters = [cluster.centroid for cluster in qb.cluster(mt)]
        mt_clusters = set_number_of_points(mt_clusters, nb_res_points)

        print("Performing Linear Registration...")
        srr = StreamlineLinearRegistration()
        srm = srr.optimize(static=st_clusters, moving=mt_clusters)

        print(
            "Affine transformation matrix with Streamline Linear Registration:"
        )
        affine = srm.matrix
        print('%s' % affine)

        print("Fill the dictionary.")
        table[moving_tractogram_basename, static_tractogram_basename] = {
            'affine': affine
        }
        pickle.dump(table,
                    open(table_filename, 'w'),
                    protocol=pickle.HIGHEST_PROTOCOL)

    return affine
    bounds = [(-20, 20), (-20, 20), (-20, 20),
              (-30, 30), (-30, 30), (-30, 30),
              (0.5, 1.5), (0.5, 1.5), (0.5, 1.5),
              (-1, 1), (-1, 1), (-1, 1)]

if not affine:
    
    x0 = np.array([0, 0, 0, 0, 0, 0.])
    #default is BundleMinDistanceFast, rigid and L-BFGS-B
    metric = BundleMinDistance()
    method = 'Powell'#L-BFGS-B'
    bounds = None
    #bounds = [(-20, 20), (-20, 20), (-20, 20),
    #          (-30, 30), (-30, 30), (-30, 30)]

srr = StreamlineLinearRegistration(metric=metric, x0=x0, bounds=bounds)

srm = srr.optimize(static=t_tract_tmp, moving=s_tract_tmp)

"""
After the optimization is finished we can apply the learned transformation to
``s_tract``.
"""

s_tract_aligned = srm.transform(s_tract)

save_tracks_dpy(s_tract_aligned, out_file)
print 'Saved:  ' , out_file

                
def show_both_bundles(bundles, colors=None, show=False, fname=None):