Exemple #1
0
def getLengths(streamlines):
    from dipy.tracking.utils import length
    lengths = list(length(streamlines))

    nb_stl = len(streamlines)
    min_len = min(length(streamlines))
    max_len = max(length(streamlines))

    print('Nb. streamlines:')
    print(nb_stl)
    print('Min length:')
    print(min_len)
    print('Max length:')
    print(max_len)
    
    return lengths
Exemple #2
0
def filterLength(streamlines, thr_len):
    from dipy.tracking.utils import length
    new_streamlines = [ s for s, l in zip(streamlines, getLengths(streamlines)) if l > thr_len ] #3.5 #2.5

    #new_streamlines_l = list(new_streamlines)
    new_lengths = list(length(new_streamlines))
    print('Nb. new streamlines:')
    print(len(new_streamlines))
    
    return new_streamlines
Exemple #3
0
def test_length():
    # Generate a simulated bundle of fibers:
    n_streamlines = 50
    n_pts = 100
    t = np.linspace(-10, 10, n_pts)

    bundle = []
    for i in np.linspace(3, 5, n_streamlines):
        pts = np.vstack((np.cos(2 * t/np.pi), np.zeros(t.shape) + i, t)).T
        bundle.append(pts)

    start = np.random.randint(10, 30, n_streamlines)
    end = np.random.randint(60, 100, n_streamlines)

    bundle = [10 * streamline[start[i]:end[i]] for (i, streamline) in
              enumerate(bundle)]

    bundle_lengths = length(bundle)
    for idx, this_length in enumerate(bundle_lengths):
        assert_equal(this_length, metrix.length(bundle[idx]))
def filterlength(dname, fdwi, ffa, ftrk, thr_length, show=False):
    
    fa_img = nib.load(ffa)
    fa = fa_img.get_data()
    affine = fa_img.get_affine()

    img = nib.load(fdwi)
    data = img.get_data()
    
    from nibabel import trackvis
    streams, hdr = trackvis.read(ftrk)
    streamlines = [s[0] for s in streams]
    
    # threshold on streamline length

    from dipy.tracking.utils import length
    lengths = list(length(streamlines))

    new_streamlines = [ s for s, l in zip(streamlines, lengths) if l > thr_length ] #3.5
    
    # info length streamlines

    print(len(streamlines))
    print(len(new_streamlines))

    print(max(length(streamlines)))
    print(min(length(streamlines)))

    print(max(length(new_streamlines)))
    print(min(length(new_streamlines)))
    
    # show new tracto

    new_streamlines = list(new_streamlines)
    new_lengths = list(length(new_streamlines))

    fnew_tractogram = dname + 'filteredtractogram.trk'
    save_trk_old_style(fnew_tractogram, new_streamlines, affine, fa.shape)

    if show:
        show_results(data, new_streamlines, fa, affine, opacity=0.6)
# Make a corpus callosum seed mask for tracking
seed_mask = labels == 2
seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)
# Make a streamline bundle model of the corpus callosum ROI connectivity
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                            step_size=2)
streamlines = Streamlines(streamlines)


"""
We do not want our results inflated by short streamlines, so we remove
streamlines shorter than 40mm prior to calculating the CCI.
"""

lengths = list(length(streamlines))
long_streamlines = Streamlines()
for i, sl in enumerate(streamlines):
    if lengths[i] > 40:
        long_streamlines.append(sl)


"""
Now we calculate the Cluster Confidence Index using the corpus callosum
streamline bundle and visualize them.
"""


cci = cluster_confidence(long_streamlines)

# Visualize the streamlines, colored by cci
Exemple #6
0
    return bundle


bundle = simulated_bundles()

print("This bundle has %d streamlines" % len(bundle))

"""
This bundle has 50 streamlines.

Using the ``length`` function we can retrieve the lengths of each streamline.
Below we show the histogram of the lengths of the streamlines.
"""

lengths = list(length(bundle))

import matplotlib.pyplot as plt

fig_hist, ax = plt.subplots(1)
ax.hist(lengths, color="burlywood")
ax.set_xlabel("Length")
ax.set_ylabel("Count")
plt.show()
plt.legend()
plt.savefig("length_histogram.png")

"""
.. figure:: length_histogram.png
   :align: center
def compute_correlation(data, distance, prototype_policies, num_prototypes, iterations, verbose=False, size_limit=1000):
    global tracks_t,ids_l,data_original,tracks_s,id_t,tracks_n,a_ind, tracks_subsample
    print "Computing distance matrix and similarity matrix (original space):",
    data_original = data
    if data.shape[0] > size_limit:
        print
        print "Datset too big: subsampling to %s entries only!" % size_limit
        data = data[np.random.permutation(data.shape[0])[:size_limit], :]
    od = distance(data, data)     
    print od.shape
    original_distances = squareform(od)
    #original_distances2 = squareform(od)
    
    """
    my code 
    """
   
    affine=utils.affine_for_trackvis(voxel_size=np.array([2,2,2]))
    
    lengths = list(length(data_original)) 
    lengths=np.array(lengths)
    
    temp=np.where(lengths>10)[0]
    l=np.argsort(lengths)[::-1][:len(temp)]
    data_original_temp=data_original[l]
    a= streamline_mapping_new_step(data_original_temp, affine=affine)
    tracks_subsample=data_original_temp[a]
    
    print len(tracks_subsample)
   
    """
    my code 
    """ 
   
    rho = np.zeros((len(prototype_policies), len(num_prototypes),iterations))

    for m, prototype_policy in enumerate(prototype_policies):
        print prototype_policy
        for j, num_proto in enumerate(num_prototypes):
            print "number of prototypes:", num_proto, " - ", 
            for k in range(iterations):
                print k,
                stdout.flush()
                if verbose: print("Generating %s prototypes as" % num_proto),
                # Note that we use the original dataset here, not the subsampled one!
                if prototype_policy=='random':
                    if verbose: print("random subset of the initial data.")
                    prototype_idx = np.random.permutation(data_original.shape[0])[:num_proto]
                    prototype = [data_original[i] for i in prototype_idx]
                  
               
                elif prototype_policy=='sff':
                    prototype_idx = subset_furthest_first(data_original, num_proto, distance)
                    prototype = [data_original[i] for i in prototype_idx]  
                                 

                elif prototype_policy=='fft':
                    
                    prototype_idx = furthest_first_traversal( tracks_subsample, num_proto, distance)
                    prototype = [ tracks_subsample[i] for i in prototype_idx]
                
              
                else:
                    raise Exception                

                if verbose: print("Computing dissimilarity matrix.")
                 
                data_dissimilarity = distance(data, prototype)
                
                if verbose: print("Computing distance matrix (dissimilarity space).")
                dissimilarity_distances = pdist(data_dissimilarity, metric='euclidean')
               
                rho[m,j,k] = correlation(original_distances, dissimilarity_distances)[0]
            print
    return rho
"""
Reading the track
"""
streams1,hdr1=nib.trackvis.read('/home/nusrat/dataset_trackvis/101.trk')
tracks = np.array([s[0] for s in streams1], dtype=np.object) 

"""
affine for converting the streamline cordinate with the voxel cordinate
"""
affine=utils.affine_for_trackvis(voxel_size=np.array([2,2,2]))

"""
length function from the dipy 
https://github.com/nipy/dipy/blob/master/dipy/tracking/_utils.py
"""
lengths = list(length(tracks)) 

"""
fiter the whole tractography--like we are not interested the track less than 10. 
"""
lengths=np.array(lengths)
l=np.where(lengths>10)[0]

"""
rearrange the tracktography
"""
tracks=tracks[l]

"""
checking is there any crossing bewtween them--calling the function that retrun the id of streamline those are not crossing
"""
    return T, hdr



if __name__ == '__main__':

	T_A_filename = 'F:\Thesis\data\\100307\\100307_af.left.trk'
	T_A,hdr= loadtrkfile(T_A_filename, threshold_short_streamlines=10.0) 
	
	T_A.tolist()
	T_A_filename2 = 'F:\Thesis\data\\124422\\124422_af.left.trk'
	#T_A_filename2 = 'F:\Thesis\data\\100307\\100307_af.right.trk'
	T_A2,hdr= loadtrkfile(T_A_filename2, threshold_short_streamlines=10.0) 
	T_A2.tolist()


	
	dis=mam_distances(T_A[0], T_A2[0], metric='avg' )


	print("Track Distance .. ")
	
	lengths = list(length(T_A))
	fig_hist, ax = plt.subplots()
	
	ax.plot(dis)
	plt.show()
	print(dis)
	
	
Exemple #10
0
We will use a slice of the anatomically-based corpus callosum ROI as our
seed mask to demonstrate the method.
 """

# Make a corpus callosum seed mask for tracking
seed_mask = labels == 2
seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)
# Make a streamline bundle model of the corpus callosum ROI connectivity
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine, step_size=2)
streamlines = Streamlines(streamlines)
"""
We do not want our results inflated by short streamlines, so we remove
streamlines shorter than 40mm prior to calculating the CCI.
"""

lengths = list(length(streamlines))
long_streamlines = Streamlines()
for i, sl in enumerate(streamlines):
    if lengths[i] > 40:
        long_streamlines.append(sl)
"""
Now we calculate the Cluster Confidence Index using the corpus callosum
streamline bundle and visualize them.
"""

cci = cluster_confidence(long_streamlines)

# Visualize the streamlines, colored by cci
ren = window.Renderer()

hue = [0.5, 1]
Exemple #11
0
def compute_measures(args):
    filename_tuple, no_uniformize = args
    sft = load_tractogram(filename_tuple[0], filename_tuple[1])
    _, dimensions, voxel_size, _ = sft.space_attributes
    if not no_uniformize:
        uniformize_bundle_sft(sft)
    nbr_streamlines = len(sft)
    if not nbr_streamlines:
        logging.warning('{} is empty'.format(filename_tuple[0]))
        return dict(zip(['volume', 'volume_endpoints', 'streamlines_count',
                         'avg_length', 'std_length', 'min_length',
                         'max_length', 'span', 'curl', 'diameter',
                         'elongation', 'surface_area', 'end_surface_area_head',
                         'end_surface_area_tail', 'radius_head', 'radius_tail',
                         'irregularity', 'irregularity_of_end_surface_head',
                         'irregularity_of_end_surface_tail', 'mean_curvature',
                         'fractal_dimension'],
                        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                         0, 0, 0, 0, 0, 0, 0, 0]))

    streamline_cords = list(sft.streamlines)
    length_list = list(length(streamline_cords))
    length_avg = float(np.average(length_list))
    length_std = float(np.std(length_list))
    length_min = float(np.min(length_list))
    length_max = float(np.max(length_list))

    sft.to_vox()
    sft.to_corner()
    streamlines = sft.streamlines
    density = compute_tract_counts_map(streamlines, dimensions)
    endpoints_density = get_endpoints_density_map(streamlines, dimensions)

    span_list = list(map(compute_span, streamline_cords))
    span = float(np.average(span_list))
    curl = length_avg / span
    volume = np.count_nonzero(density) * np.product(voxel_size)
    diameter = 2 * np.sqrt(volume / (np.pi * length_avg))
    elon = length_avg / diameter

    roi = np.where(density != 0, 1, density)
    surf_area = approximate_surface_node(roi) * (voxel_size[0] ** 2)
    irregularity = surf_area / (np.pi * diameter * length_avg)

    endpoints_map_head, endpoints_map_tail = \
        get_head_tail_density_maps(sft.streamlines, dimensions)
    endpoints_map_head_roi = \
        np.where(endpoints_map_head != 0, 1, endpoints_map_head)
    endpoints_map_tail_roi = \
        np.where(endpoints_map_tail != 0, 1, endpoints_map_tail)
    end_sur_area_head = \
        approximate_surface_node(endpoints_map_head_roi) * (voxel_size[0] ** 2)
    end_sur_area_tail = \
        approximate_surface_node(endpoints_map_tail_roi) * (voxel_size[0] ** 2)

    endpoints_coords_head = np.array(np.where(endpoints_map_head_roi)).T
    endpoints_coords_tail = np.array(np.where(endpoints_map_tail_roi)).T
    radius_head = 1.5 * np.average(
        np.sqrt(((endpoints_coords_head - np.average(
            endpoints_coords_head, axis=0))
            ** 2).sum(axis=1)))
    radius_tail = 1.5 * np.average(
        np.sqrt(((endpoints_coords_tail - np.average(
            endpoints_coords_tail, axis=0))
            ** 2).sum(axis=1)))
    end_irreg_head = (np.pi * radius_head ** 2) / end_sur_area_head
    end_irreg_tail = (np.pi * radius_tail ** 2) / end_sur_area_tail

    fractal_dimension = compute_fractal_dimension(density)

    curvature_list = np.zeros((nbr_streamlines,))
    for i in range(nbr_streamlines):
        curvature_list[i] = mean_curvature(sft.streamlines[i])

    return dict(zip(['volume', 'volume_endpoints', 'streamlines_count',
                     'avg_length', 'std_length', 'min_length', 'max_length',
                     'span', 'curl', 'diameter', 'elongation', 'surface_area',
                     'end_surface_area_head', 'end_surface_area_tail',
                     'radius_head', 'radius_tail',
                     'irregularity', 'irregularity_of_end_surface_head',
                     'irregularity_of_end_surface_tail', 'mean_curvature',
                     'fractal_dimension'],
                    [volume, np.count_nonzero(endpoints_density) *
                     np.product(voxel_size), nbr_streamlines,
                     length_avg, length_std, length_min, length_max,
                     span, curl, diameter, elon, surf_area, end_sur_area_head,
                     end_sur_area_tail, radius_head, radius_tail, irregularity,
                     end_irreg_head, end_irreg_tail,
                     float(np.mean(curvature_list)), fractal_dimension]))
Exemple #12
0
def fib_lengths_count(stream):
    lengths = length(stream)
    return lengths
#ftractogram = dname + 'tractogram.trk'

#save .trk
#save_trk_old_style(ftractogram, streamlines, affine, fa.shape)

#render
show_results(streamlines, fa, fa_affine)


# In[18]:

# threshold on streamline length

from dipy.tracking.utils import length
lengths = list(length(streamlines))

new_streamlines = [ s for s, l in zip(streamlines, lengths) if l > 2. ] #3.5


# In[19]:

# info length streamlines

print(len(streamlines))
print(len(new_streamlines))

print(max(length(streamlines)))
print(min(length(streamlines)))

print(max(length(new_streamlines)))
def experiment1(f_name,data_path):
    "OUTPUT"
    f=open(f_name+'_out.txt','w')
    """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
    """Read Data"""
    dipy_home = pjoin(os.path.expanduser('~'), '.dipy')

    folder = pjoin(dipy_home, data_path)
    fraw = pjoin(folder, f_name+'.nii.gz')
    fbval = pjoin(folder, f_name+'.bval')
    fbvec = pjoin(folder, f_name+'.bvec')
    flabels = pjoin(folder, f_name+'.nii-label.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs)
    img = nib.load(fraw)
    data = img.get_data()
    print('data.shape (%d, %d, %d, %d)' % data.shape)
    print('Building DTI Model Data......')

    """Load label"""
    label_img = nib.load(flabels)
    labels=label_img.get_data()
    
    labelpo1=label_position(labels,1)
    print labelpo1
    """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
    maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2)

    from dipy.reconst.dti import TensorModel
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data, mask)

    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    np.save(f_name+'_FA',FA)
    fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine())
    #nib.save(fa_img,f_name+'_DTI_tensor_fa.nii.gz')
    #print('Saving "DTI_tensor_fa.nii.gz" sucessful.')
    evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), img.get_affine())
    #nib.save(evecs_img, f_name+'_DTI_tensor_evecs.nii.gz')
    #print('Saving "DTI_tensor_evecs.nii.gz" sucessful.')
    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
    ""Fiber Tracking"""
    print('Fiber Tracking......')
    from dipy.tracking.eudx import EuDX
    from dipy.data import  get_sphere


    from dipy.tracking import utils
    seeds = utils.seeds_from_mask(labels, density=3)
    print('The number of seeds is %d.' % len(seeds))

    print >>f,('The number of seeds is %d.' % len(seeds))

    sphere = get_sphere('symmetric724')
    from dipy.reconst.dti import quantize_evecs
    evecs = evecs_img.get_data()
    peak_indices = quantize_evecs(evecs, sphere.vertices)
    """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
    streamline_generator = EuDX(FA.astype('f8'),
                            peak_indices,
                            seeds=10**5,
                            odf_vertices=sphere.vertices,
                            a_low=0.2,
                            step_sz=0.5,
                            ang_thr=60.0,
                            total_weight=.5,
                            max_points=10**5)
    streamlines_all = [streamlines_all for streamlines_all in streamline_generator]
    
    """"""""""""""""""""""""""""""
    """Select length bigger than 10"""
    from dipy.tracking.utils import length
    lengths = list(length(streamlines_all))
    select_length = 0
    length=len(streamlines_all)
    j=0
    for i in range(length):
        if ((lengths[i]) > select_length):   
            streamlines_all[j] = streamlines_all[i]
            j=j+1
    j=j-1
    streamlines = streamlines_all[0:j]
    
    
    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = img.get_header().get_zooms()[:3]
    hdr['voxel_order'] = 'LAS'
    hdr['dim'] = FA.shape[:3]
    
    """划出roi streamlines"""
    affine = streamline_generator.affine
    cc_streamlines=label_streamlines(streamlines, labels,1, affine, hdr, f_name, data_path)
    #M,grouping = connective_label(cc_streamlines, labels, affine, hdr, f_name, data_path)
    label_streamlines_density(cc_streamlines, labels, affine,f_name,img, label_img)
    """两个label的问题"""
    flabels2 = pjoin(folder, f_name+'22.nii-label.nii.gz')
    label_img2 = nib.load(flabels2)
    labels2=label_img2.get_data()
    
    cc22_streamlines=label_streamlines(streamlines, labels2,3, affine, hdr, f_name, data_path)  
    labels3 = labels[:] 
    for i in range(len(labels)):
        for j in range(len(labels[i])):
                for k in range(len(labels[i][j])) :
                    if (labels[i][j][k]==0 and labels2[i][j][k]!=0):
                        labels3[i][j][k] = labels2[i][j][k]
    
    M,grouping = connective_label(streamlines, labels3, affine, hdr, f_name, data_path)
    print M
    print grouping[0,3]
    
    
#experiment1('zhu long ping','patient_data')
Exemple #15
0
    ]

    return bundle


bundle = simulated_bundles()

print('This bundle has %d streamlines' % len(bundle))
"""
This bundle has 50 streamlines.

Using the ``length`` function we can retrieve the lengths of each streamline.
Below we show the histogram of the lengths of the streamlines.
"""

lengths = list(length(bundle))

import matplotlib.pyplot as plt

fig_hist, ax = plt.subplots(1)
ax.hist(lengths, color='burlywood')
ax.set_xlabel('Length')
ax.set_ylabel('Count')
plt.show()
plt.legend()
plt.savefig('length_histogram.png')
"""
.. figure:: length_histogram.png
   :align: center

   **Histogram of lengths of the streamlines**