Пример #1
0
def test_rb_reduction_mam():

    rb = RecoBundles(f, greater_than=0, clust_thr=10, verbose=True)

    rec_trans, rec_labels = rb.recognize(model_bundle=f2,
                                         model_clust_thr=5.,
                                         reduction_thr=10,
                                         reduction_distance='mam',
                                         slr=True,
                                         slr_metric='asymmetric',
                                         pruning_distance='mam')

    D = bundles_distances_mam(f2, f[rec_labels])

    # check if the bundle is recognized correctly
    if len(f2) == len(rec_labels):
        for row in D:
            assert_equal(row.min(), 0)

    refine_trans, refine_labels = rb.refine(model_bundle=f2,
                                            pruned_streamlines=rec_trans,
                                            model_clust_thr=5.,
                                            reduction_thr=10)

    D = bundles_distances_mam(f2, f[refine_labels])

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
Пример #2
0
def test_rb_clustermap():

    cluster_map = qbx_and_merge(f, thresholds=[40, 25, 20, 10])

    rb = RecoBundles(f, greater_than=0, less_than=1000000,
                     cluster_map=cluster_map, clust_thr=10)
    rec_trans, rec_labels = rb.recognize(model_bundle=f2,
                                         model_clust_thr=5.,
                                         reduction_thr=10)

    D = bundles_distances_mam(f2, f[rec_labels])

    # check if the bundle is recognized correctly
    if len(f2) == len(rec_labels):
        for row in D:
            assert_equal(row.min(), 0)

    refine_trans, refine_labels = rb.refine(model_bundle=f2,
                                            pruned_streamlines=rec_trans,
                                            model_clust_thr=5.,
                                            reduction_thr=10)

    D = bundles_distances_mam(f2, f[refine_labels])

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
Пример #3
0
def test_rb_reduction_mam():

    rb = RecoBundles(f, greater_than=0, clust_thr=10, verbose=True)

    rec_trans, rec_labels = rb.recognize(model_bundle=f2,
                                         model_clust_thr=5.,
                                         reduction_thr=10,
                                         reduction_distance='mam',
                                         slr=True,
                                         slr_metric='asymmetric',
                                         pruning_distance='mam')

    D = bundles_distances_mam(f2, f[rec_labels])

    # check if the bundle is recognized correctly
    if len(f2) == len(rec_labels):
        for row in D:
            assert_equal(row.min(), 0)

    refine_trans, refine_labels = rb.refine(model_bundle=f2,
                                            pruned_streamlines=rec_trans,
                                            model_clust_thr=5.,
                                            reduction_thr=10)

    D = bundles_distances_mam(f2, f[refine_labels])

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
Пример #4
0
def test_rb_clustermap():

    cluster_map = qbx_and_merge(f, thresholds=[40, 25, 20, 10])

    rb = RecoBundles(f, greater_than=0, less_than=1000000,
                     cluster_map=cluster_map, clust_thr=10)
    rec_trans, rec_labels = rb.recognize(model_bundle=f2,
                                         model_clust_thr=5.,
                                         reduction_thr=10)

    D = bundles_distances_mam(f2, f[rec_labels])

    # check if the bundle is recognized correctly
    if len(f2) == len(rec_labels):
        for row in D:
            assert_equal(row.min(), 0)

    refine_trans, refine_labels = rb.refine(model_bundle=f2,
                                            pruned_streamlines=rec_trans,
                                            model_clust_thr=5.,
                                            reduction_thr=10)

    D = bundles_distances_mam(f2, f[refine_labels])

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
Пример #5
0
def test_rb_disable_slr():

    rb = RecoBundles(f, greater_than=0, clust_thr=10)

    rec_trans, rec_labels = rb.recognize(model_bundle=f2,
                                         model_clust_thr=5.,
                                         reduction_thr=10,
                                         slr=False)

    D = bundles_distances_mam(f2, f[rec_labels])

    # check if the bundle is recognized correctly
    if len(f2) == len(rec_labels):
        for row in D:
            assert_equal(row.min(), 0)

    refine_trans, refine_labels = rb.refine(model_bundle=f2,
                                            pruned_streamlines=rec_trans,
                                            model_clust_thr=5.,
                                            reduction_thr=10)

    D = bundles_distances_mam(f2, f[refine_labels])

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
Пример #6
0
def Shannon_entropy(tract):
    '''
    compute the Shannon Entropy of a set of tracks as defined by Lauren
    H(A) = (-1/|A|) * sum{log(1/|A|)* sum[p(f_i|f_j)]} 
    where p(f_i|f_j) = exp(-d(f_i,f_j)*d(f_i,f_j))
    '''
    from dipy.tracking.distances import bundles_distances_mam
    import numpy as np
    #if number of fiber is too large, just sample
    if len(tract) > 3500:
        #from dissimilarity_common import subset_furthest_first
        #prototype_idx = subset_furthest_first(tract, 500, bundles_distances_mam)
        #prototype = [tract[i] for i in prototype_idx]
        
        prototype_idx = np.random.permutation(tract.shape[0])[:3500]
        prototype = [tract[i] for i in prototype_idx]
        
        dm = bundles_distances_mam(prototype, prototype)
    else:
        dm = bundles_distances_mam(tract, tract)
    
    dm = np.array(dm, dtype =float)
    
    dm2 = dm**2
    
    A = len(dm)
    theta = 10.
    
    pr_all = np.exp((-dm**2)/theta)
    
    pr_i = (1./A) * np.array([sum(pr_all[i]) for i in np.arange(A)])
        
    entropy = (-1./A) * sum([np.log(pr_i[i]) for i in np.arange(A)])
    print entropy
    return entropy
Пример #7
0
def test_rb_disable_slr():

    rb = RecoBundles(f, greater_than=0, clust_thr=10)

    rec_trans, rec_labels = rb.recognize(model_bundle=f2,
                                         model_clust_thr=5.,
                                         reduction_thr=10,
                                         slr=False)

    D = bundles_distances_mam(f2, f[rec_labels])

    # check if the bundle is recognized correctly
    if len(f2) == len(rec_labels):
        for row in D:
            assert_equal(row.min(), 0)

    refine_trans, refine_labels = rb.refine(model_bundle=f2,
                                            pruned_streamlines=rec_trans,
                                            model_clust_thr=5.,
                                            reduction_thr=10)

    D = bundles_distances_mam(f2, f[refine_labels])

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
def load_cst(tracks_filename, cst_index_file, ext):
    from dipy.io.dpy import Dpy
    from dipy.io.pickles import load_pickle
    dpr_tracks = Dpy(tracks_filename, 'r')
    all_tracks=dpr_tracks.read_tracks()
    dpr_tracks.close()
    tracks_id = load_pickle(cst_index_file)
    	
    cst = [all_tracks[i] for i  in tracks_id]    
    
    cst_ext = [all_tracks[i] for i  in tracks_id]
    medoid_cst = []
    #len_dis = 250
    if ext:
        k = np.round(len(cst)*1.2)
        not_cst_fil = []
        min_len = min(len(i) for i in cst)
        #print 'min_len of cst', min_len
        min_len = min_len*2.2/3#2./3.2# - 20
        for i in np.arange(len(all_tracks)):
            if (i not in tracks_id) and (length(all_tracks[i]) > min_len):
                not_cst_fil.append(all_tracks[i])
        
        #for st in all_tracks:
        #    if (length(st)>=min_len) and (st not in cst):
        #        not_cst_fil.append(st)
                
        from dipy.segment.quickbundles import QuickBundles
        
        qb = QuickBundles(cst,200,18)
        
        medoid_cst = qb.centroids[0]
        
        med_notcst_dm = bundles_distances_mam([medoid_cst], not_cst_fil)
        med_cst_dm = bundles_distances_mam([medoid_cst], cst)
        
        cst_rad = med_cst_dm[0][np.argmax(med_cst_dm[0])]
        len_dis = cst_rad * 2.8/2.
        #print med_cst_dm
        #print cst_rad
        #print len_dis
        #k_indices which close to the medoid
        sort = np.argsort(med_notcst_dm,axis = 1)[0]
        #print sort[:k+1]
        while (k>0 and med_notcst_dm[0][sort[k]]>=len_dis):
            k = k - 1
            
        #print med_notcst_dm[0][sort[0:k]]    
        #print k
        #close_indices = np.argsort(cst_dm,axis = 1)[:,0:k][0]
        close_indices = sort[0:k]
        
        for idx in close_indices:
            cst_ext.append(not_cst_fil[idx])            
        
        return cst, cst_ext, medoid_cst

    return cst
Пример #9
0
def test_bundles_distances_mam():
    xyz1A = np.array([[0, 0, 0], [1, 0, 0], [2, 0, 0], [3, 0, 0]],
                     dtype='float32')
    xyz2A = np.array([[0, 1, 1], [1, 0, 1], [2, 3, -2]], dtype='float32')
    xyz1B = np.array([[-1, 0, 0], [2, 0, 0], [2, 3, 0], [3, 0, 0]],
                     dtype='float32')
    tracksA = [xyz1A, xyz2A]
    tracksB = [xyz1B, xyz1A, xyz2A]
    for metric in ('avg', 'min', 'max'):
        pf.bundles_distances_mam(tracksA, tracksB, metric=metric)
Пример #10
0
def test_bundles_distances_mam():
    xyz1A = np.array([[0, 0, 0], [1, 0, 0], [2, 0, 0], [3, 0, 0]],
                     dtype='float32')
    xyz2A = np.array([[0, 1, 1], [1, 0, 1], [2, 3, -2]], dtype='float32')
    xyz1B = np.array([[-1, 0, 0], [2, 0, 0], [2, 3, 0], [3, 0, 0]],
                     dtype='float32')
    tracksA = [xyz1A, xyz2A]
    tracksB = [xyz1B, xyz1A, xyz2A]
    for metric in ('avg', 'min', 'max'):
        pf.bundles_distances_mam(tracksA, tracksB, metric=metric)
Пример #11
0
def test_whole_brain_slr():
    streams, hdr = nib.trackvis.read(get_fnames('fornix'))
    fornix = [s[0] for s in streams]

    f = Streamlines(fornix)
    f1 = f.copy()
    f2 = f.copy()

    # check translation
    f2._data += np.array([50, 0, 0])

    moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr(
            f1, f2, x0='affine', verbose=True, rm_small_clusters=2,
            greater_than=0, less_than=np.inf,
            qbx_thr=[5, 2, 1], progressive=False)

    # we can check the quality of registration by comparing the matrices
    # MAM streamline distances before and after SLR
    D12 = bundles_distances_mam(f1, f2)
    D1M = bundles_distances_mam(f1, moved)

    d12_minsum = np.sum(np.min(D12, axis=0))
    d1m_minsum = np.sum(np.min(D1M, axis=0))

    print("distances= ", d12_minsum, " ", d1m_minsum)

    assert_equal(d1m_minsum < d12_minsum, True)

    assert_array_almost_equal(transform[:3, 3], [-50, -0, -0], 2)

    # check rotation

    mat = compose_matrix44([0, 0, 0, 15, 0, 0])

    f3 = f.copy()
    f3 = transform_streamlines(f3, mat)

    moved, transform, qb_centroids1, qb_centroids2 = slr_with_qbx(
            f1, f3, verbose=False, rm_small_clusters=1, greater_than=20,
            less_than=np.inf, qbx_thr=[2],
            progressive=True)

    # we can also check the quality by looking at the decomposed transform

    assert_array_almost_equal(decompose_matrix44(transform)[3], -15, 2)

    moved, transform, qb_centroids1, qb_centroids2 = slr_with_qbx(
            f1, f3, verbose=False, rm_small_clusters=1, select_random=400,
            greater_than=20, less_than=np.inf, qbx_thr=[2],
            progressive=True)

    # we can also check the quality by looking at the decomposed transform

    assert_array_almost_equal(decompose_matrix44(transform)[3], -15, 2)
Пример #12
0
def create_dataset_from_tractography(size1, size2, same=True):
    if same: assert(size2 >= size1)
    filename = 'data/tracks_dti_10K_linear.dpy'
    print "Loading", filename
    dpr = Dpy(filename, 'r')
    tractography = dpr.read_tracks()
    dpr.close()
    print len(tractography), "streamlines"
    print "Removing streamlines that are too short"
    tractography = filter(lambda x: len(x) > 20, tractography) # remove too short streamlines
    print len(tractography), "streamlines"    
    tractography = np.array(tractography, dtype=np.object)

    print "Creating two simulated tractographies of sizes", size1, "and", size2
    if same:
        ids = fft(tractography, k=max([size1, size2]), distance=bundles_distances_mam)
        tractography1 = tractography[ids[:size1]]
    else:
        # ids1 = np.random.permutation(len(tractography))[:size1]
        # ids1 = sff(tractography, k=size1, distance=bundles_distances_mam)
        ids1 = fft(tractography, k=size1, distance=bundles_distances_mam)
        tractography1 = tractography[ids1[:size1]]

    if same:
        tractography2 = tractography[ids[:size2]]
    else:
        # ids2 = np.random.permutation(len(tractography))[:size2]
        # ids2 = sff(tractography, k=size2, distance=bundles_distances_mam)
        ids2 = fft(tractography, k=size2, distance=bundles_distances_mam)
        tractography2 = tractography[ids2]
        
    print "Done."

    print "Computing the distance matrices for each tractography."
    dm1 = bundles_distances_mam(tractography1, tractography1)
    dm2 = bundles_distances_mam(tractography2, tractography2)

    print("Computing similarity matrices.")
    sigma2 = np.mean([np.median(dm1), np.median(dm2)]) ** 2.0
    print("sigma2 = %f" % sigma2)
    A = np.exp(-dm1 * dm1 / sigma2)
    B = np.exp(-dm2 * dm2 / sigma2)

    # Note: the optimization works even using distance instead of similarity:
    # A = dm1
    # B = dm2

    return A, B
Пример #13
0
def test_visualize_map_2tracts(streamlines):
    distortion = False
    length_min = 20
    #from nibabel import trackvis
    #filename = 'data/HCP_subject124422_100Kseeds/tracks_dti_100K.trk'
    #print("Loading %s" % filename)
    #streamlines, header = trackvis.read(open(filename))
    
    print("Removing streamlines shorter than %s points and transforming to array." % length_min)
    streamlines = np.array(filter(lambda x: len(x) >= length_min, [s[0] for s in streamlines]), dtype=np.object)
    
    size_A = 100
    size_B = 20
    print("Size A: %s" % size_A)
    print("Size B: %s" % size_B)
    idx_A = np.random.permutation(len(streamlines))[:size_A]
    streamlines_A = streamlines[idx_A]
    idx_B = idx_A[:size_B] # np.random.permutation(len(streamlines))[:size_B]
    streamlines_B = streamlines[idx_B]

    if distortion:
        streamlines_A = np.array([transform(s) for s in streamlines_A], dtype=np.object)

    print("Computing 1NN mapping.")
    dm_AB = bundles_distances_mam(streamlines_A, streamlines_B)
    mappingAB_1nn = dm_AB.argmin(1)

    print("Visualizing streamlines and mapping.")
    visualize_map_2tracts(streamlines_A, streamlines_B, mappingAB_1nn, line='tube', color_A='auto', color_B='auto')
Пример #14
0
def test_rb_slr_threads():

    rng_multi = np.random.RandomState(42)
    rb_multi = RecoBundles(f, greater_than=0, clust_thr=10,
                           rng=np.random.RandomState(42))
    rec_trans_multi_threads, _ = rb_multi.recognize(model_bundle=f2,
                                                    model_clust_thr=5.,
                                                    reduction_thr=10,
                                                    slr=True,
                                                    slr_num_threads=None)

    rb_single = RecoBundles(f, greater_than=0, clust_thr=10,
                            rng=np.random.RandomState(42))
    rec_trans_single_thread, _ = rb_single.recognize(model_bundle=f2,
                                              model_clust_thr=5.,
                                              reduction_thr=10,
                                              slr=True,
                                              slr_num_threads=1)

    D = bundles_distances_mam(rec_trans_multi_threads, rec_trans_single_thread)

    # check if the bundle is recognized correctly
    # multi-threading prevent an exact match
    for row in D:
        assert_almost_equal(row.min(), 0, decimal=4)
def tracts_mapping1(tractography1, tractography2, loss_function, neighbour, iterations_anneal_now,pre_map_file):

    ann = [100, 200, 400, 600, 800, 1000]        
    iterations_anneal_pre = 0
    
    if iterations_anneal_now<=100:
        dm12 = bundles_distances_mam(tractography1, tractography2)
        mapping12_coregistration_1nn = np.argmin(dm12, axis=1)
    else:
        k = (iterations_anneal_now/200) - 1
        iterations_anneal_pre = ann[k]
        from dipy.io.pickles import load_pickle
        mapping12_coregistration_1nn = load_pickle(pre_map_file)
        
    iterations_anneal = iterations_anneal_now - iterations_anneal_pre
    
    print "Iteration: ", iterations_anneal_now, iterations_anneal_pre, iterations_anneal
    print "The previous coregistration gives a mapping12 with the following loss:"        
    loss_coregistration_1nn = loss_function(mapping12_coregistration_1nn)
    print "loss =", loss_coregistration_1nn

    #iterations_anneal = 100
    print "Simulated Annealing"
    np.random.seed(1) 
    initial_state =  mapping12_coregistration_1nn.copy()
    mapping12_best, energy_best = anneal(initial_state=initial_state, energy_function=loss_function, neighbour=neighbour, transition_probability=transition_probability, temperature=temperature_boltzmann, max_steps=iterations_anneal, energy_max=0.0, T0=200.0, log_every=1000)

    return mapping12_coregistration_1nn, loss_coregistration_1nn, mapping12_best, energy_best
Пример #16
0
def init_prb_state_sparse(tract1, tract2, nearest = 10):
    '''
    distribution based on the convert of distance
    '''   
    
    dm12 = bundles_distances_mam(tract1, tract2)
    
    #print dm12
    
    cs_idxs = [dm12[i].argsort()[:nearest] for i in np.arange(len(tract1))] #chosen indices
    ncs_idxs = [dm12[i].argsort()[nearest:] for i in np.arange(len(tract1))] #not chosen indices

    for i in np.arange(size1):
        dm12[i][ncs_idxs[i]] = 0      
    
    '''
    test sparse optimzation
    '''
    
    #print dm12
    
    
    from common_functions import normalize_sum_row_1
    prb = normalize_sum_row_1(dm12)
    
    #print prb
    
    return np.array(prb,dtype='float'), cs_idxs
Пример #17
0
def test_rb_slr_threads():

    rng_multi = np.random.RandomState(42)
    rb_multi = RecoBundles(f, greater_than=0, clust_thr=10,
                           rng=np.random.RandomState(42))
    rec_trans_multi_threads, _ = rb_multi.recognize(model_bundle=f2,
                                                    model_clust_thr=5.,
                                                    reduction_thr=10,
                                                    slr=True,
                                                    num_threads=None)

    rb_single = RecoBundles(f, greater_than=0, clust_thr=10,
                            rng=np.random.RandomState(42))
    rec_trans_single_thread, _ = rb_single.recognize(model_bundle=f2,
                                                     model_clust_thr=5.,
                                                     reduction_thr=10,
                                                     slr=True,
                                                     num_threads=1)

    D = bundles_distances_mam(rec_trans_multi_threads, rec_trans_single_thread)

    # check if the bundle is recognized correctly
    # multi-threading prevent an exact match
    for row in D:
        assert_almost_equal(row.min(), 0, decimal=4)
def test_whole_brain_slr():
    streams, hdr = nib.trackvis.read(get_data('fornix'))
    fornix = [s[0] for s in streams]

    f = Streamlines(fornix)
    f1 = f.copy()
    f2 = f.copy()

    # check translation
    f2._data += np.array([50, 0, 0])

    moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr(
            f1, f2, verbose=True, rm_small_clusters=2, greater_than=0,
            less_than=np.inf, qb_thr=5, progressive=False)

    # we can check the quality of registration by comparing the matrices
    # MAM streamline distances before and after SLR
    D12 = bundles_distances_mam(f1, f2)
    D1M = bundles_distances_mam(f1, moved)

    d12_minsum = np.sum(np.min(D12, axis=0))
    d1m_minsum = np.sum(np.min(D1M, axis=0))

    assert_equal(d1m_minsum < d12_minsum, True)

    assert_array_almost_equal(transform[:3, 3], [-50, -0, -0], 3)

    # check rotation
    mat = compose_matrix44([0, 0, 0, 15, 0, 0])

    f3 = f.copy()
    f3 = transform_streamlines(f3, mat)

    moved, transform, qb_centroids1, qb_centroids2 = slr_with_qb(
            f1, f3, verbose=False, rm_small_clusters=1, greater_than=20,
            less_than=np.inf, qb_thr=2, progressive=True)

    # we can also check the quality by looking at the decomposed transform
    assert_array_almost_equal(decompose_matrix44(transform)[3], -15, 2)

    moved, transform, qb_centroids1, qb_centroids2 = slr_with_qb(
            f1, f3, verbose=False, rm_small_clusters=1, select_random=400,
            greater_than=20,
            less_than=np.inf, qb_thr=2, progressive=True)

    # we can also check the quality by looking at the decomposed transform
    assert_array_almost_equal(decompose_matrix44(transform)[3], -15, 2)
Пример #19
0
def bundles_distances_mam_smarter(A, B=None):
    """Smarter of bundles_distances_mam that avoids computing
    distances twice.
    """
    lenA = len(A)
    if B is None:
        dm = np.empty((lenA, lenA), dtype=np.float32)
        dm[np.diag_indices(lenA)] = 0.0
        for i, s in enumerate(A[:-1]):
            dm[i, i + 1:] = bundles_distances_mam([s], A[i + 1:])
            dm[i + 1:, i] = dm[i, i + 1:]

    else:
        lenB = len(B)
        dm = np.empty((lenA, lenB), dtype=np.float32)
        for i, s in enumerate(A):
            dm[i, :] = bundles_distances_mam([s], B)

    return dm
Пример #20
0
def test_bundles_distances_mam():
    xyz1A = np.array([[0, 0, 0], [1, 0, 0], [2, 0, 0], [3, 0, 0]],
                     dtype='float32')
    xyz2A = np.array([[0, 1, 1], [1, 0, 1], [2, 3, -2]], dtype='float32')
    xyz1B = np.array([[-1, 0, 0], [2, 0, 0], [2, 3, 0], [3, 0, 0]],
                     dtype='float32')
    tracksA = [xyz1A, xyz2A]
    tracksB = [xyz1B, xyz1A, xyz2A]

    with assert_warns(UserWarning):
        for metric in ('avg', 'min', 'max'):
            pf.bundles_distances_mam(tracksA, tracksB, metric=metric)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", category=UserWarning)
        _ = pf.bundles_distances_mam(tracksA, tracksB)
        print(w)
        assert_true(len(w) == 1)
        assert_true(issubclass(w[0].category, UserWarning))
        assert_true("not have the same number of points" in str(w[0].message))
Пример #21
0
def test_rb_check_defaults():

    rb = RecoBundles(f, clust_thr=10)
    rec_trans, rec_labels, recognized = rb.recognize(model_bundle=f2,
                                                     model_clust_thr=5.,
                                                     reduction_thr=10)
    D = bundles_distances_mam(f2, recognized)

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
Пример #22
0
def init_prb_state_1(tract1, tract2):
    '''
    distribution based on the convert of distance
    '''   
    
    dm12 = bundles_distances_mam(tract1, tract2)
        
    from common_functions import normalize_sum_row_1
    prb = normalize_sum_row_1(dm12)
    
    return np.array(prb,dtype='float')  
Пример #23
0
def test_rb_clustermap():

    cluster_map = qbx_and_merge(f, thresholds=[40, 25, 20, 10])

    rb = RecoBundles(f, cluster_map=cluster_map, clust_thr=10)
    rec_trans, rec_labels, recognized = rb.recognize(model_bundle=f2,
                                                     model_clust_thr=5.,
                                                     reduction_thr=10)
    D = bundles_distances_mam(f2, recognized)

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
def probability_map_new_ipl(tractography1, tractography2, size1, size2):
    
    print("Defining the initial flat probabilistic mapping, to be optimized.")
    P = np.ones((size1, size2))
    P = P / P.sum(1)[:,None]
    x0 = P.flatten()
    
    print "Computing the distance matrices for each tractography."
    dm1 = bundles_distances_mam(tractography1, tractography1)
    dm2 = bundles_distances_mam(tractography2, tractography2)

    print("Computing similarity matrices.")
    sigma2 = np.mean([np.median(dm1), np.median(dm2)]) ** 2.0
    print("sigma2 = %f" % sigma2)
    A = np.exp(-dm1 * dm1 / sigma2)
    B = np.exp(-dm2 * dm2 / sigma2)

    # Note: the optimization works even using distance instead of similarity:
    # A = dm1
    # B = dm2
    
    print("")
    print("Optimization...")
    print("iteration : Loss")
    t0 = time()
    xopt = fmin_powell(f, x0, args=(size1, size2, A, B), disp=True, full_output=False, maxiter=4, ftol=1.0e-4)
    print("Optimization done in %f secs." % (time() - t0))

    print("")
    # Popt = xopt.reshape(size1, size2)
    # mapping_opt = Popt.argmax(1)
    P_best = xopt.reshape(size1, size2)    
    mapping_best = P_best.argmax(1)
    print("Best Loss = %s" % loss_best)
    print("Best mapping = %s" % mapping_best)  
    
    return mapping_best
Пример #25
0
def greedy_optimization(dm_sA, dm_sA_mapping_B, dr_B, mapping_A_in_B, kdt, idxs_to_optimize, neighborhood_size=100, optimization_steps=1000, seed=0):
    np.random.seed(seed)
    print
    print "Loss:", loss(dm_sA, dm_sA_mapping_B)
    print "Greedy optimization."
    for n in range(optimization_steps):
        flag = False
        # print n
        # pick one streamline at random from sA:
        i = idxs_to_optimize[np.random.randint(len(idxs_to_optimize))]
        # retrieve d(s_i^A, s_j^A) for each j!=i :
        d_i_A = dm_sA[i,:]
        # retrieve d(s_{\phi(i)}^B, s__{\phi(j)}^B) for each j!=i :
        d_phii_B = dm_sA_mapping_B[i,:]
        # compute loss(i):
        tmp = d_i_A - d_phii_B
        partial_loss_i = (tmp * tmp).sum()
        # print "l(i):", partial_loss_i
        
        # retrieve a neighborhood of phi(i):   (phi(i) excluded)
        neighbors = kdt.query(dr_B[mapping_A_in_B[i]], k=neighborhood_size, return_distance=False).squeeze()[1:]
        # compute the change in loss when switching from phi(i) to each of the neighbors, and greedily keep just the improvements:
        best_partial_loss = partial_loss_i
        best_candidate = mapping_A_in_B[i]
        d_best_candidate_B = d_phii_B
        for candidate in neighbors:
            # computing new distances:
            d_candidate_B = bundles_distances_mam([streamlines_B[candidate]], streamlines_B[mapping_A_in_B]).squeeze()
            d_candidate_B[i] = 0.0 # fixing distance i with proper value
            # computing new partial loss:
            tmp = d_i_A - d_candidate_B
            l_candidate = (tmp * tmp).sum()
            # updating the best_candidate:
            if l_candidate < best_partial_loss:
                print "Improvement:", best_partial_loss - l_candidate
                best_partial_loss = l_candidate
                best_candidate = candidate
                d_best_candidate_B = d_candidate_B
                flag = True

        # If optimization happened, update mapping and dm_sA_mapping_B and compute new loss:
        if flag:
            mapping_A_in_B[i] = best_candidate
            dm_sA_mapping_B[i, :] = d_best_candidate_B
            dm_sA_mapping_B[:, i] = d_best_candidate_B

            print "Loss:", loss(dm_sA, dm_sA_mapping_B)

    return mapping_A_in_B, dm_sA_mapping_B
Пример #26
0
def test_rb_no_verbose_and_mam():

    rb = RecoBundles(f, clust_thr=10, verbose=False)

    rec_trans, rec_labels, recognized = rb.recognize(model_bundle=f2,
                                                     model_clust_thr=5.,
                                                     reduction_thr=10,
                                                     slr=True,
                                                     pruning_distance='mam')

    D = bundles_distances_mam(f2, recognized)

    # check if the bundle is recognized correctly
    for row in D:
        assert_equal(row.min(), 0)
Пример #27
0
def shannon_entropy(tract):
    '''
    compute the Shannon Entropy of a set of tracks as defined by Lauren
    H(A) = (-1/|A|) * sum{log(1/|A|)* sum[p(f_i|f_j)]} 
    where p(f_i|f_j) = exp(-d(f_i,f_j)*d(f_i,f_j))
    '''

    dm = bundles_distances_mam(tract, tract)
    dm = np.array(dm, dtype=float)
    A = len(tract)
    theta = 10.
    pr_all = np.exp((-dm**2) / theta)
    pr_i = (1. / A) * np.array([sum(pr_all[i]) for i in np.arange(A)])
    entropy = (-1. / A) * sum([np.log(pr_i[i]) for i in np.arange(A)])

    return entropy
Пример #28
0
def shannon_entropy(tract):
    '''
    compute the Shannon Entropy of a set of tracks as defined by Lauren
    H(A) = (-1/|A|) * sum{log(1/|A|)* sum[p(f_i|f_j)]} 
    where p(f_i|f_j) = exp(-d(f_i,f_j)*d(f_i,f_j))
    '''

    dm = bundles_distances_mam(tract, tract)
    dm = np.array(dm, dtype =float)
    A = len(tract)
    theta = 10.
    pr_all = np.exp((-dm**2)/theta)    
    pr_i = (1./A) * np.array([sum(pr_all[i]) for i in np.arange(A)])
    entropy = (-1./A) * sum([np.log(pr_i[i]) for i in np.arange(A)])

    return entropy
def tracts_mapping(tractography1, tractography2, loss_function, neighbour, iterations_anneal):
        
    print
    print "The best coregistration+1NN gives a mapping12 with the following loss:"
    dm12 = bundles_distances_mam(tractography1, tractography2)
    mapping12_coregistration_1nn = np.argmin(dm12, axis=1)
    loss_coregistration_1nn = loss_function(mapping12_coregistration_1nn)
    print "loss =", loss_coregistration_1nn

    #iterations_anneal = 100
    print "Simulated Annealing"
    np.random.seed(1) 
    initial_state =  mapping12_coregistration_1nn.copy()
    mapping12_best, energy_best = anneal(initial_state=initial_state, energy_function=loss_function, neighbour=neighbour, transition_probability=transition_probability, temperature=temperature_boltzmann, max_steps=iterations_anneal, energy_max=0.0, T0=200.0, log_every=1000)

    return mapping12_coregistration_1nn, loss_coregistration_1nn, mapping12_best, energy_best
Пример #30
0
def load_or_create(subject, side, len_threshold=20, k=100, outdir='data_als/cache/', seed=0):
    filename = 'data_als/%d/tracks_dti_3M_linear.trk' % subject

    print "Loading", filename
    streamlines, header = read(filename)
    streamlines = np.array(streamlines, dtype=np.object)[:,0]

    # hd = md5(streamlines).hexdigest()
    # print "hexdigest:", hd

    filename_cst = 'data_als/%d/%d_corticospinal_%s_3M.pkl'
    filename_cst = filename_cst % (subject, subject_segmentation[subject], side)
    print "Loading CST", filename_cst
    cst_ids = np.load(filename_cst)
    # cst_streamlines = streamlines[cst_ids]

    print "Building the dissimilarity representation."
    try:
        filename_prototypes = outdir+'Pi_ids_%d_%s.npy' % (subject, side)
        print "Trying to load", filename_prototypes
        Pi_ids = np.load(filename_prototypes)
        print "Done."
    except IOError:
        print "Not found."
        print "Creating prototypes."
        lenghts = np.array([len(s) for s in streamlines])
        streamlines_long_ids = np.where(lenghts > len_threshold)[0] # using long streamlines heuristic
        distance = bundles_distances_mam
        np.random.seed(seed)
        Pi_ids = streamlines_long_ids[subset_furthest_first(streamlines[streamlines_long_ids], k=k, distance=distance)] # using long streamlines heuristic
        print "Saving", filename_prototypes
        np.save(filename_prototypes, Pi_ids)
        Pi = streamlines[Pi_ids]
        
    try:
        filename_dr = outdir+'dr_%d_%s.npy' % (subject, side)
        print "Trying to load", filename_dr
        dr = np.load(filename_dr)
        print "Done."
    except IOError:
        print "Not found."
        print "Computing the dissimilarity matrix."
        dr = bundles_distances_mam(streamlines, Pi).astype(np.float32)
        print "Saving", filename_dr
        np.save(filename_dr, dr.astype(np.float32))

    return streamlines, cst_ids, Pi_ids, dr
Пример #31
0
    def _reduce_search_space(self,
                             model_centroids,
                             reduction_thr=20,
                             reduction_distance='mdf'):
        if self.verbose:
            t = time()
            logger.info('# Reduce search space')
            logger.info(' Reduction threshold %0.3f' % (reduction_thr, ))
            logger.info(' Reduction distance {}'.format(reduction_distance))

        if reduction_distance.lower() == 'mdf':
            if self.verbose:
                logger.info(' Using MDF')
            centroid_matrix = bundles_distances_mdf(model_centroids,
                                                    self.centroids)
        elif reduction_distance.lower() == 'mam':
            if self.verbose:
                logger.info(' Using MAM')
            centroid_matrix = bundles_distances_mam(model_centroids,
                                                    self.centroids)
        else:
            raise ValueError('Given reduction distance not known')

        centroid_matrix[centroid_matrix > reduction_thr] = np.inf

        mins = np.min(centroid_matrix, axis=0)
        close_clusters_indices = list(np.where(mins != np.inf)[0])

        close_clusters = self.cluster_map[close_clusters_indices]

        neighb_indices = [cluster.indices for cluster in close_clusters]

        neighb_streamlines = Streamlines(chain(*close_clusters))

        nb_neighb_streamlines = len(neighb_streamlines)

        if nb_neighb_streamlines == 0:
            if self.verbose:
                logger.info('You have no neighbor streamlines... ' +
                            'No bundle recognition')
            return Streamlines([]), []
        if self.verbose:
            logger.info(' Number of neighbor streamlines %d' %
                        (nb_neighb_streamlines, ))
            logger.info(' Duration %0.3f sec. \n' % (time() - t, ))

        return neighb_streamlines, neighb_indices
def compute_partial_cost(A, B, assignment):
  swap = False
  if len(A) > len(B):
      A, B = B, A
      swap = True

  costAB = np.zeros(len(A))
  for i, idx in enumerate(assignment):
      costAB[i] = bundles_distances_mam([A[i]], [B[idx]], metric='avg') / min(len(A[i]), len(B[idx]))

  cost01 = costAB
  if swap:
      cost01 = -np.ones(len(B))
      cost01[assignment] = costAB
      cost01[cost01 == -1] = costAB.max()

  return cost01
Пример #33
0
def init_prb_state_sparse(tract1, tract2, nearest = 10):
    '''
    distribution based on the convert of distance
    '''   
    from dipy.tracking.distances import bundles_distances_mam
    
    dm12 = bundles_distances_mam(tract1, tract2)
    
    #print dm12
    
    cs_idxs = [dm12[i].argsort()[:nearest] for i in np.arange(len(tract1))] #chosen indices
    ncs_idxs = [dm12[i].argsort()[nearest:] for i in np.arange(len(tract1))] #not chosen indices

    size1 = len(tract1)
    
    for i in np.arange(size1):
        cs_idxs[i].sort()
        ncs_idxs[i].sort()
        dm12[i][ncs_idxs[i]] = 0      
    
    '''
    test sparse optimzation
    '''
    #print cs_idxs
    #print dm12
    
    prb = np.zeros((size1,nearest))
 
    for i in np.arange(size1):
        prb[i] = dm12[i][cs_idxs[i]]
       
    from common_functions import normalize_sum_row_1
    prb = normalize_sum_row_1(prb)
    
    #print prb
    #stop
    return np.array(prb,dtype='float'),np.array(cs_idxs, dtype = 'float')   
Пример #34
0
def Shannon_entropy(tract):
    '''
    compute the Shannon Entropy of a set of tracks as defined by Lauren
    H(A) = (-1/|A|) * sum{log(1/|A|)* sum[p(f_i|f_j)]} 
    where p(f_i|f_j) = exp(-d(f_i,f_j)*d(f_i,f_j))
    '''
    from dipy.tracking.distances import bundles_distances_mam
    import numpy as np
    dm = bundles_distances_mam(tract, tract)
    
    dm = np.array(dm, dtype =float)
    
    dm2 = dm**2
    
    A = len(tract)
    theta = 10.
    
    pr_all = np.exp((-dm**2)/theta)
    
    pr_i = (1./A) * np.array([sum(pr_all[i]) for i in np.arange(A)])
    
    #sum_all = np.sum(pr_i)
    #print pr_i
    #print sum_all
    #pr_i = (1./sum_all) * pr_i
    #print pr_i
    
    #entropy = (-1./A) * sum([pr_i[i]*np.log(pr_i[i]) for i in np.arange(A)])
    entropy = (-1./A) * sum([np.log(pr_i[i]) for i in np.arange(A)])
    #entropy = (-1.) * sum([pr_i[i]*(np.log2(pr_i[i])) for i in np.arange(A)])
    
    #print dm2
    #print pr_all
    #print pr_i
    
    return entropy
Пример #35
0
    def _prune_what_not_in_model(self, model_centroids,
                                 transf_streamlines,
                                 neighb_indices,
                                 mdf_thr=5,
                                 pruning_thr=10,
                                 pruning_distance='mdf'):

        if pruning_thr < 0:
            print('Pruning_thr has to be greater or equal to 0')

        if self.verbose:
            print('# Prune streamlines using the MDF distance')
            print(' Pruning threshold %0.3f' % (pruning_thr,))
            print(' Pruning distance {}'.format(pruning_distance))
            t = time()

        thresholds = [40, 30, 20, 10, mdf_thr]
        rtransf_cluster_map = qbx_and_merge(transf_streamlines,
                                            thresholds, nb_pts=20,
                                            select_randomly=500000,
                                            rng=self.rng,
                                            verbose=self.verbose)

        if self.verbose:
            print(' QB Duration %0.3f sec. \n' % (time() - t, ))

        rtransf_centroids = rtransf_cluster_map.centroids

        if pruning_distance.lower() == 'mdf':
            if self.verbose:
                print(' Using MDF')
            dist_matrix = bundles_distances_mdf(model_centroids,
                                                rtransf_centroids)
        elif pruning_distance.lower() == 'mam':
            if self.verbose:
                print(' Using MAM')
            dist_matrix = bundles_distances_mam(model_centroids,
                                                rtransf_centroids)
        else:
            raise ValueError('Given pruning distance is not available')
        dist_matrix[np.isnan(dist_matrix)] = np.inf
        dist_matrix[dist_matrix > pruning_thr] = np.inf

        pruning_matrix = dist_matrix.copy()

        if self.verbose:
            print(' Pruning matrix size is (%d, %d)'
                  % pruning_matrix.shape)

        mins = np.min(pruning_matrix, axis=0)
        pruned_indices = [rtransf_cluster_map[i].indices
                          for i in np.where(mins != np.inf)[0]]
        pruned_indices = list(chain(*pruned_indices))
        pruned_streamlines = transf_streamlines[np.array(pruned_indices)]

        initial_indices = list(chain(*neighb_indices))
        final_indices = [initial_indices[i] for i in pruned_indices]
        labels = final_indices

        if self.verbose:
            msg = ' Number of centroids: %d'
            print(msg % (len(rtransf_centroids),))
            msg = ' Number of streamlines after pruning: %d'
            print(msg % (len(pruned_streamlines),))

        if len(pruned_streamlines) == 0:
            print(' You have removed all streamlines')
            return Streamlines([]), []

        if self.verbose:
            print(' Duration %0.3f sec. \n' % (time() - t, ))

        return pruned_streamlines, labels
Пример #36
0
    filename = 'data/tracks_dti_10K_linear.dpy'
        
    dpr = Dpy(filename, 'r')
    tracks = dpr.read_tracks()
    dpr.close()
    tracks = np.array(tracks, dtype=np.object)

    size1 = 100
    size2 = 100
    tracks1 = tracks[np.random.permutation(len(tracks))[:size1]]
    tracks2 = tracks[np.random.permutation(len(tracks))[:size2]]
    # solution = np.random.permutation(len(tracks1))
    # tracks2 = tracks1[solution]
    # inverse_solution = np.argsort(solution)

    dm1 = bundles_distances_mam(tracks1, tracks1)
    dm2 = bundles_distances_mam(tracks2, tracks2)

    print "For each s1 and for each s2 we reorder their distances and create a mapping from the first ordered ids to the second ordered ids"
    print "Then we compute the loss of this mapping"
    print "This approach is suboptimal but should provide a very good guess."

    idx2_best = None
    loss_best = 100000000
    mapping12_best = None
    for i1 in range(size1):
        idx1 = np.argsort(dm1[i1])

        for i2 in range(size2):
            idx2 = np.argsort(dm2[i2])
            mapping12 = np.argsort(idx2[np.argsort(idx1)]) # this line is tricky and create the mapping as desiderd. It works correctly because if tracks2 is just a reshuffling of tracks1 then it leads to loss=0, as expected.
Пример #37
0
        "Removing streamlines shorter than %s points and transforming to array."
        % length_min)
    streamlines = np.array(filter(lambda x: len(x) >= length_min,
                                  [s[0] for s in streamlines]),
                           dtype=np.object)

    size_A = 100
    size_B = 20
    print("Size A: %s" % size_A)
    print("Size B: %s" % size_B)
    idx_A = np.random.permutation(len(streamlines))[:size_A]
    streamlines_A = streamlines[idx_A]
    idx_B = idx_A[:size_B]  # np.random.permutation(len(streamlines))[:size_B]
    streamlines_B = streamlines[idx_B]

    if distortion:
        streamlines_A = np.array([transform(s) for s in streamlines_A],
                                 dtype=np.object)

    print("Computing 1NN mapping.")
    dm_AB = bundles_distances_mam(streamlines_A, streamlines_B)
    mappingAB_1nn = dm_AB.argmin(1)

    print("Visualizing streamlines and mapping.")
    visualize(streamlines_A,
              streamlines_B,
              mappingAB_1nn,
              line='tube',
              color_A='auto',
              color_B='auto')
def mapping_nn(tractography1, tractography2):
    #print 'Compute the 1-nn from source to target'
    dm12 = bundles_distances_mam(tractography1, tractography2)
    mapping12_coregistration_1nn = np.argmin(dm12, axis=1)    
    return mapping12_coregistration_1nn
Пример #39
0

if __name__ == '__main__':

    distortion = False

    length_min = 20
    filename = 'data/HCP_subject124422_100Kseeds/tracks_dti_100K.trk'
    print("Loading %s" % filename)
    streamlines, header = trackvis.read(open(filename))
    print("Removing streamlines shorter than %s points and transforming to array." % length_min)
    streamlines = np.array(filter(lambda x: len(x) >= length_min, [s[0] for s in streamlines]), dtype=np.object)
    
    size_A = 100
    size_B = 20
    print("Size A: %s" % size_A)
    print("Size B: %s" % size_B)
    idx_A = np.random.permutation(len(streamlines))[:size_A]
    streamlines_A = streamlines[idx_A]
    idx_B = idx_A[:size_B] # np.random.permutation(len(streamlines))[:size_B]
    streamlines_B = streamlines[idx_B]

    if distortion:
        streamlines_A = np.array([transform(s) for s in streamlines_A], dtype=np.object)

    print("Computing 1NN mapping.")
    dm_AB = bundles_distances_mam(streamlines_A, streamlines_B)
    mappingAB_1nn = dm_AB.argmin(1)

    print("Visualizing streamlines and mapping.")
    visualize(streamlines_A, streamlines_B, mappingAB_1nn, line='tube', color_A='auto', color_B='auto')
Пример #40
0
        cst_viz_A = fvtk.streamtube(cst_streamlines_A, fvtk.colors.red)
        cst_viz_B = fvtk.streamtube(cst_streamlines_B, fvtk.colors.blue)
        fvtk.add(r, cst_viz_A)
        fvtk.add(r, cst_viz_B)
        fvtk.show(r)
        
    if show:
        r = fvtk.ren()
        Pi_viz_A = fvtk.streamtube(streamlines_A[Pi_ids_A], fvtk.colors.red)
        fvtk.add(r, Pi_viz_A)
        Pi_viz_B = fvtk.streamtube(streamlines_B[Pi_ids_B], fvtk.colors.blue)
        fvtk.add(r, Pi_viz_B)
        fvtk.show(r)

    print "Computing the distance matrix between Pi_A streamlines."
    dm_Pi_A = bundles_distances_mam(streamlines_A[Pi_ids_A], streamlines_A[Pi_ids_A])
    print "Computing the distance matrix between Pi_B streamlines."
    dm_Pi_B = bundles_distances_mam(streamlines_B[Pi_ids_B], streamlines_B[Pi_ids_B])
    print "Loss:", loss(dm_Pi_A, dm_Pi_B)

    print "Computing KDTree on B with the dissimilarity representation."
    dr_dim = 100
    print "Using the first", dr_dim, "prototypes."
    kdt = KDTree(dr_B[:,:dr_dim])
    print "Computing the dissimilarity representation of prototypes A in B."
    dr_Pi_A_in_B = bundles_distances_mam(streamlines_A[Pi_ids_A], streamlines_B[Pi_ids_B])
    print "Computing the initial mapping."
    print "Retrieving the nearest-neighbors of prototypes A in B."
    Pi_ids_A_1nn_B = kdt.query(dr_Pi_A_in_B[:,:dr_dim], k=1, return_distance=False).squeeze()
    mapping_Pi = Pi_ids_A_1nn_B.copy()
    mapping_Pi_initial = mapping_Pi.copy()
                
                
                cst_len = len(s_cst)
                
                pr_s = s_cst_sff_in_ext[-num_pro:]
                
                #Step 0: Source prototypes (pr_s) and mapped prototypes (in target tracts_extension) 
                #        (call mapped_pr_s_in_t) are aligned together        
                mapped_pr = map_all[-num_pro:]
                
                mapped_pr_s_in_t = [t_cst_ext[idx] for idx in mapped_pr]
                
                #Step 1: Compute the dissimilarity of source tract based on pr_s - called dis_s
                #       Compute the dissimilarity of target tract extension based on mapped_pr_s_in_t (mapped of pr_s in target extension)- called dis_t_ext
                
                dis_s = bundles_distances_mam(s_cst, pr_s)
                dis_t_ext = bundles_distances_mam(t_cst_ext, mapped_pr_s_in_t)

                #Step 3: Compute the kd-tree of target tract extension based on dis_t_ext - call kdt_t_ext
                kdt_t_ext = BallTree(dis_t_ext,leaf_size=30) # KDTree(dis_t_ext) 
                                             
            
                #Step 4: Segment the cst_s in the target using nearest neighbor of each fiber_source 
                #        in the space of the kdt_t_ext - result is nn_cst_s_in_t
        
                k = 1         
                dst_nn, idx_nn = kdt_t_ext.query(dis_s, k)

                #print 'Distance'
                #print dst_nn
                #print 'Index '
Пример #42
0
    """Just NumPy with very compact broadcasting
    """
    dm = s1[:, None, :] - s2[None, :, :]
    dm = (dm * dm).sum(2)
    return 0.5 * (np.sqrt(dm.min(0)).mean() + np.sqrt(dm.min(1)).mean())

if __name__ == '__main__':
    
    filename ='data/tracks_dti_10K_linear.dpy'
        
    dpr = Dpy(filename, 'r')
    tracks = dpr.read_tracks()
    dpr.close()
    tracks = np.array(tracks, dtype=np.object) # [:100]

    dm = bundles_distances_mam(tracks, tracks)

    # Problem:
    # Given a streamline s1 find the most similar streamline s2!=s1
    # where similarity is in term of similarity of the neighbouroods
    # (in terms of distances) between s1 and s2.
    # Idea for a solution:
    # 0) sort distances of tracks from s0 = d0
    # 1) for each s1 in the neighbourood of s0:
    # 1.1) sort distances of tracks from s1 = d1
    # 2) for each s2 in tracks:
    # 2.1) sort distances of tracks from s2 = d2
    # 2.2) tot = the Euclidean distance between d2 and d0
    # 2.3) for each s3 in the neighbourhood of s2, (closests first):
    # 2.3.1) sort distances of tracks from s3 = d3
    # 2.4.1) compute minimum the Euclidean distance between d3 and all d1s
Пример #43
0
source_cst = load_tract(s_file,s_ind)

target_cst_ext = load_tract(t_file,t_ind)

print len(source_cst), len(target_cst_ext)

tractography1 = source_cst[-num_pro:]
tractography2 = target_cst_ext[:num_pro]
#tractography2 = target_cst_ext[:num_pro*2]

print "Source", len(tractography1)
print "Target", len(tractography2)


#print "Computing the distance matrices for each tractography."
dm1 = bundles_distances_mam(tractography1, tractography1)
dm2 = bundles_distances_mam(tractography2, tractography2)

size1 = len(tractography1)
size2 = len(tractography2)

if vis:
    ren = fvtk.ren() 
    ren = visualize_tract(ren, tractography1, fvtk.yellow)
    ren = visualize_tract(ren, tractography2, fvtk.blue)
    fvtk.show(ren)
  

y_dm1 = dm1
x_dm2 = dm2
L = []
Пример #44
0
def test_whole_brain_slr():
    fname = get_fnames('fornix')

    fornix = load_tractogram(fname, 'same', bbox_valid_check=False).streamlines

    f = Streamlines(fornix)
    f1 = f.copy()
    f2 = f.copy()

    # check translation
    f2._data += np.array([50, 0, 0])

    moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr(
        f1,
        f2,
        x0='affine',
        verbose=True,
        rm_small_clusters=2,
        greater_than=0,
        less_than=np.inf,
        qbx_thr=[5, 2, 1],
        progressive=False)

    # we can check the quality of registration by comparing the matrices
    # MAM streamline distances before and after SLR
    D12 = bundles_distances_mam(f1, f2)
    D1M = bundles_distances_mam(f1, moved)

    d12_minsum = np.sum(np.min(D12, axis=0))
    d1m_minsum = np.sum(np.min(D1M, axis=0))

    print("distances= ", d12_minsum, " ", d1m_minsum)

    assert_equal(d1m_minsum < d12_minsum, True)

    assert_array_almost_equal(transform[:3, 3], [-50, -0, -0], 2)

    # check rotation

    mat = compose_matrix44([0, 0, 0, 15, 0, 0])

    f3 = f.copy()
    f3 = transform_streamlines(f3, mat)

    moved, transform, qb_centroids1, qb_centroids2 = slr_with_qbx(
        f1,
        f3,
        verbose=False,
        rm_small_clusters=1,
        greater_than=20,
        less_than=np.inf,
        qbx_thr=[2],
        progressive=True)

    # we can also check the quality by looking at the decomposed transform

    assert_array_almost_equal(decompose_matrix44(transform)[3], -15, 2)

    moved, transform, qb_centroids1, qb_centroids2 = slr_with_qbx(
        f1,
        f3,
        verbose=False,
        rm_small_clusters=1,
        select_random=400,
        greater_than=20,
        less_than=np.inf,
        qbx_thr=[2],
        progressive=True)

    # we can also check the quality by looking at the decomposed transform

    assert_array_almost_equal(decompose_matrix44(transform)[3], -15, 2)
Пример #45
0
    print "streamline_min_size =", streamline_min_size
    print "streamline_max_size =", streamline_max_size

    B1 = []
    for i in range(B1_size):
        n1 = np.random.randint(streamline_min_size, streamline_max_size)
        B1.append(np.random.random((n1, 3)))

    B2 = []
    for i in range(B2_size):
        n2 = np.random.randint(streamline_min_size, streamline_max_size)
        B2.append(np.random.random((n2, 3)))

    print "DiPy's bundles_distances_mam():",
    t0 = time()
    mam_dm_dipy = bundles_distances_mam(B1, B2, metric='avg')
    print time() - t0 , 'sec.'

    dms = []
    for k, distance in enumerate(distances):
        print k, ')', distance.__doc__.strip(), ':',
        t0 = time()
        dm = np.zeros((len(B1), len(B2)))
        for i in range(len(B1)):
            for j in range(len(B2)):
                dm[i, j] = distance(B1[i], B2[j])
                
        print time() - t0 , 'sec.'
        dms.append(dm)
        if len(dms) > 1:
            np.testing.assert_almost_equal(dms[-1], dms[-2], decimal=5)
Пример #46
0
    def _prune_what_not_in_model(self,
                                 model_centroids,
                                 transf_streamlines,
                                 neighb_indices,
                                 mdf_thr=5,
                                 pruning_thr=10,
                                 pruning_distance='mdf'):

        if pruning_thr < 0:
            print('Pruning_thr has to be greater or equal to 0')

        if self.verbose:
            print('# Prune streamlines using the MDF distance')
            print(' Pruning threshold %0.3f' % (pruning_thr, ))
            print(' Pruning distance {}'.format(pruning_distance))
            t = time()

        thresholds = [40, 30, 20, 10, mdf_thr]
        rtransf_cluster_map = qbx_and_merge(transf_streamlines,
                                            thresholds,
                                            nb_pts=20,
                                            select_randomly=500000,
                                            rng=self.rng,
                                            verbose=self.verbose)

        if self.verbose:
            print(' QB Duration %0.3f sec. \n' % (time() - t, ))

        rtransf_centroids = rtransf_cluster_map.centroids

        if pruning_distance.lower() == 'mdf':
            if self.verbose:
                print(' Using MDF')
            dist_matrix = bundles_distances_mdf(model_centroids,
                                                rtransf_centroids)
        elif pruning_distance.lower() == 'mam':
            if self.verbose:
                print(' Using MAM')
            dist_matrix = bundles_distances_mam(model_centroids,
                                                rtransf_centroids)
        else:
            raise ValueError('Given pruning distance is not available')
        dist_matrix[np.isnan(dist_matrix)] = np.inf
        dist_matrix[dist_matrix > pruning_thr] = np.inf

        pruning_matrix = dist_matrix.copy()

        if self.verbose:
            print(' Pruning matrix size is (%d, %d)' % pruning_matrix.shape)

        mins = np.min(pruning_matrix, axis=0)
        pruned_indices = [
            rtransf_cluster_map[i].indices for i in np.where(mins != np.inf)[0]
        ]
        pruned_indices = list(chain(*pruned_indices))
        idx = np.array(pruned_indices)
        if len(idx) == 0:
            print(' You have removed all streamlines')
            return Streamlines([]), []

        pruned_streamlines = transf_streamlines[idx]

        initial_indices = list(chain(*neighb_indices))
        final_indices = [initial_indices[i] for i in pruned_indices]
        labels = final_indices

        if self.verbose:
            msg = ' Number of centroids: %d'
            print(msg % (len(rtransf_centroids), ))
            msg = ' Number of streamlines after pruning: %d'
            print(msg % (len(pruned_streamlines), ))

        if self.verbose:
            print(' Duration %0.3f sec. \n' % (time() - t, ))

        return pruned_streamlines, labels
Пример #47
0
            subTract[j], subHdr[j] = trackvis.read(T_filename,
                                                   as_generator=False)
            subTract[j] = np.array([s[0] for s in subTract[j]],
                                   dtype=np.object)
            length1 = length1 + len(subTract[j])
            tractLen.append(len(subTract[j]))
            # print(length1)
            s = np.concatenate((s, subTract[j]), axis=0)

        print(len(s))
        print("Tract Length")
        print(tractLen)
        #print(s[0:5])
        #DM = bundles_distances_mam(s , wholeTract )
        #print(DM[0:10])
        DM = bundles_distances_mam(s.tolist(), wholeTract.tolist())
        print("Length of distance Matrix")
        print(len(DM))
        #print(DM[0])
        minVal = [[0 for x in range(2)] for y in range(len(s))]
        minVal[i].remove(0)
        minVal[i].remove(0)
        wholeTract = 0
        for l in range(0, len(s)):

            m, k = min((v, i) for i, v in enumerate(DM[l]))
            print(m, k)
            minVal[i].append(k)
            #print("Minimum loop ")
            #print(minVal[i])