Esempio n. 1
0
def test_eudx():

    # read bvals,gradients and data
    fimg, fbvals, fbvecs = get_data("small_64D")
    bvals = np.load(fbvals)
    gradients = np.load(fbvecs)
    img = ni.load(fimg)
    data = img.get_data()

    print(data.shape)
    gqs = GeneralizedQSampling(data, bvals, gradients)
    ten = Tensor(data, bvals, gradients, thresh=50)
    seed_list = np.dot(np.diag(np.arange(10)), np.ones((10, 3)))
    iT = iter(EuDX(gqs.qa(), gqs.ind(), seeds=seed_list))
    T = []
    for t in iT:
        T.append(t)
    iT2 = iter(EuDX(ten.fa(), ten.ind(), seeds=seed_list))
    T2 = []
    for t in iT2:
        T2.append(t)

    print("length T ", sum([length(t) for t in T]))
    print("length T2", sum([length(t) for t in T2]))

    print(gqs.QA[1, 4, 8, 0])
    print(gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)])

    assert_almost_equal(
        gqs.QA[1, 4, 8, 0], gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)]
    )

    assert_almost_equal(sum([length(t) for t in T]), 70.999996185302734, places=3)
    assert_almost_equal(sum([length(t) for t in T2]), 56.999997615814209, places=3)
Esempio n. 2
0
def test_eudx():
    
    #read bvals,gradients and data
    fimg,fbvals, fbvecs = get_data('small_64D')    
    bvals=np.load(fbvals)
    gradients=np.load(fbvecs)
    img =ni.load(fimg)    
    data=img.get_data()
    
    print(data.shape)    
    gqs = GeneralizedQSampling(data,bvals,gradients)       
    ten = Tensor(data,bvals,gradients,thresh=50)
    seed_list=np.dot(np.diag(np.arange(10)),np.ones((10,3)))    
    iT=iter(EuDX(gqs.qa(),gqs.ind(),seed_list=seed_list))
    T=[]
    for t in iT: 
        T.append(t)    
    iT2=iter(EuDX(ten.fa(),ten.ind(),seed_list=seed_list))
    T2=[]
    for t in iT2: 
        T2.append(t)
    
    print('length T ',sum([length(t) for t in T]))  
    print('length T2',sum([length(t) for t in T2]))  

    print(gqs.QA[1,4,8,0])
    print(gqs.QA.ravel()[ndarray_offset(np.array([1,4,8,0]),np.array(gqs.QA.strides),4,8)])

    assert_equal(gqs.QA[1,4,8,0], gqs.QA.ravel()[ndarray_offset(np.array([1,4,8,0]),np.array(gqs.QA.strides),4,8)])

    #assert_equal, sum([length(t) for t in T ]) , 77.999996662139893
    #assert_equal, sum([length(t) for t in T2]) , 63.499998092651367
    assert_equal(sum([length(t) for t in T ]) , 75.214988201856613)
    assert_equal(sum([length(t) for t in T2]) , 60.202986091375351)
Esempio n. 3
0
def load_data(id, limits=[0, np.Inf]):
    ids = ["02", "03", "04", "05", "06", "08", "09", "10", "11", "12"]
    filename = "data/subj_" + ids[id] + "_lsc_QA_ref.dpy"
    dp = Dpy(filename, "r")
    print "Loading", filename
    tracks = dp.read_tracks()
    dp.close()
    tracks = [t for t in tracks if length(t) >= limits[0] and length(t) <= limits[1]]
    return tracks
Esempio n. 4
0
 def filtered_streamlines_by_length(self,
                                  minimum=Config.get_config()
                                  .getfloat("tracking", "minimumStreamlineLength",
                                            fallback="20"),
                                  maximum=Config.get_config()
                                  .getfloat("tracking", "maximumStreamlineLength",
                                            fallback="200")):
     """
     removes streamlines that are shorter than minimumLength (in mm)
     """
     return [x for x in self.streamlines if metrics.length(x) > minimum
             and metrics.length(x) < maximum]
def load_cst(tracks_filename, cst_index_file, ext):
    from dipy.io.dpy import Dpy
    from dipy.io.pickles import load_pickle
    dpr_tracks = Dpy(tracks_filename, 'r')
    all_tracks=dpr_tracks.read_tracks()
    dpr_tracks.close()
    tracks_id = load_pickle(cst_index_file)
    	
    cst = [all_tracks[i] for i  in tracks_id]    
    
    cst_ext = [all_tracks[i] for i  in tracks_id]
    medoid_cst = []
    #len_dis = 250
    if ext:
        k = np.round(len(cst)*1.2)
        not_cst_fil = []
        min_len = min(len(i) for i in cst)
        #print 'min_len of cst', min_len
        min_len = min_len*2.2/3#2./3.2# - 20
        for i in np.arange(len(all_tracks)):
            if (i not in tracks_id) and (length(all_tracks[i]) > min_len):
                not_cst_fil.append(all_tracks[i])
        
        #for st in all_tracks:
        #    if (length(st)>=min_len) and (st not in cst):
        #        not_cst_fil.append(st)
                
        from dipy.segment.quickbundles import QuickBundles
        
        qb = QuickBundles(cst,200,18)
        
        medoid_cst = qb.centroids[0]
        
        med_notcst_dm = bundles_distances_mam([medoid_cst], not_cst_fil)
        med_cst_dm = bundles_distances_mam([medoid_cst], cst)
        
        cst_rad = med_cst_dm[0][np.argmax(med_cst_dm[0])]
        len_dis = cst_rad * 2.8/2.
        #print med_cst_dm
        #print cst_rad
        #print len_dis
        #k_indices which close to the medoid
        sort = np.argsort(med_notcst_dm,axis = 1)[0]
        #print sort[:k+1]
        while (k>0 and med_notcst_dm[0][sort[k]]>=len_dis):
            k = k - 1
            
        #print med_notcst_dm[0][sort[0:k]]    
        #print k
        #close_indices = np.argsort(cst_dm,axis = 1)[:,0:k][0]
        close_indices = sort[0:k]
        
        for idx in close_indices:
            cst_ext.append(not_cst_fil[idx])            
        
        return cst, cst_ext, medoid_cst

    return cst
Esempio n. 6
0
def DIPY_nii2streamlines(imgfile, maskfile, bvals, bvecs, output_prefix):
    import numpy as np
    import nibabel as nib
    import os

    from dipy.reconst.dti import TensorModel

    print "nii2streamlines"

    img = nib.load(imgfile)
    bvals = np.genfromtxt(bvals)
    bvecs = np.genfromtxt(bvecs)
    if bvecs.shape[1] != 3:
        bvecs = bvecs.T
    print bvecs.shape

    from nipype.utils.filemanip import split_filename
    _, prefix, _  = split_filename(imgfile)
    from dipy.data import gradient_table
    gtab = gradient_table(bvals, bvecs)
    data = img.get_data()
    affine = img.get_affine()
    zooms = img.get_header().get_zooms()[:3]
    new_zooms = (2., 2., 2.)
    data2, affine2 = data, affine
    mask = nib.load(maskfile).get_data().astype(np.bool)
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data2, mask)

    from dipy.reconst.dti import fractional_anisotropy
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    fa_img = nib.Nifti1Image(FA, img.get_affine())
    nib.save(fa_img, experiment_dir + '/' + ('%s_tensor_fa.nii.gz' % prefix))
    evecs = tenfit.evecs
    evec_img = nib.Nifti1Image(evecs, img.get_affine())
    nib.save(evec_img, experiment_dir + '/' + ('%s_tensor_evec.nii.gz' % prefix))

    from dipy.data import get_sphere
    sphere = get_sphere('symmetric724')
    from dipy.reconst.dti import quantize_evecs

    peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices)

    from dipy.tracking.eudx import EuDX
    eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, a_low=0.2, seeds=10**6, ang_thr=35)
    tensor_streamlines = [streamline for streamline in eu]
    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = new_zooms
    hdr['voxel_order'] = 'LPS'
    hdr['dim'] = data2.shape[:3]

    import dipy.tracking.metrics as dmetrics
    tensor_streamlines = ((sl, None, None) for sl in tensor_streamlines if dmetrics.length(sl) > 15)
    ten_sl_fname = experiment_dir + '/' + ('%s_streamline.trk' % prefix)
    nib.trackvis.write(ten_sl_fname, tensor_streamlines, hdr, points_space='voxel')
    return ten_sl_fname
Esempio n. 7
0
def compute_tracking(src_dti_dir, out_trk_dir, subj_name):

    # Loading FA and evecs data
    src_fa_file = os.path.join(src_dti_dir, subj_name + par_fa_suffix)
    fa_img = nib.load(src_fa_file)
    FA = fa_img.get_data()
    affine = fa_img.get_affine()

    src_evecs_file = os.path.join(src_dti_dir, subj_name + par_evecs_suffix)
    evecs_img = nib.load(src_evecs_file)
    evecs = evecs_img.get_data()

    # Computation of streamlines
    sphere = get_sphere('symmetric724') 
    peak_indices = dti.quantize_evecs(evecs, sphere.vertices)
    streamlines = EuDX(FA.astype('f8'),
                       ind=peak_indices, 
                       seeds=par_eudx_seeds,
                       odf_vertices= sphere.vertices,
                       a_low=par_eudx_threshold)

    # Saving tractography
    voxel_size = fa_img.get_header().get_zooms()[:3]
    dims = FA.shape[:3]
    seed = par_eudx_seeds
    seed = "_%d%s" % (seed/10**6 if seed>10**5 else seed/10**3, 
                      'K' if seed < 1000000 else 'M')

    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = voxel_size
    hdr['voxel_order'] = 'LAS'
    hdr['dim'] = dims
    hdr['vox_to_ras'] = affine
    strm = ((sl, None, None) for sl in streamlines 
            if length(sl) > par_trk_min and length(sl) < par_trk_max)
    out_trk_file = os.path.join(out_trk_dir, subj_name + seed + par_trk_suffix)
    nib.trackvis.write(out_trk_file, strm, hdr, points_space='voxel')    

    tracks = [track for track in streamlines]
    out_dipy_file = os.path.join(out_trk_dir,subj_name + seed + par_dipy_suffix)
    dpw = Dpy(out_dipy_file, 'w')
    dpw.write_tracks(tracks)
    dpw.close()
Esempio n. 8
0
    def loading_full_tractograpy(self, tracpath=None):
        """
        Loading full tractography and creates StreamlineLabeler to
        show it all.
        """
        # load the tracks registered in MNI space
        self.tracpath=tracpath
        basename = os.path.basename(self.tracpath)
        tracks_basename, tracks_format = os.path.splitext(basename)
        
        if tracks_format == '.dpy': 
            
            dpr = Dpy(self.tracpath, 'r')
            print "Loading", self.tracpath
            self.T = dpr.read_tracks()
            dpr.close()
            self.T = np.array(self.T, dtype=np.object)

            
        elif tracks_format == '.trk': 
            streams, self.hdr = nib.trackvis.read(self.tracpath, points_space='voxel')
            print "Loading", self.tracpath
            self.T = np.array([s[0] for s in streams], dtype=np.object)
         

        print "Removing short streamlines"
        self.T = np.array([t for t in self.T if length(t)>= 15],  dtype=np.object)
        
        tracks_directoryname = os.path.dirname(self.tracpath) + '/.temp/'
        general_info_filename = tracks_directoryname + tracks_basename + '.spa'
        
        
        
        
        # Check if there is the .spa file that contains all the
        # computed information from the tractography anyway and try to
        # load it
        try:
            print "Looking for general information file"
            self.load_info(general_info_filename)
                    
        except (IOError, KeyError):
            print "General information not found, recomputing buffers"
            self.update_info(general_info_filename)
                    
        # create the interaction system for tracks, 
        self.streamlab  = StreamlineLabeler('Bundle Picker',
                                            self.buffers, self.clusters,
                                            vol_shape=self.dims, 
                                            affine=np.copy(self.affine),
                                            clustering_parameter=len(self.clusters),
                                            clustering_parameter_max=len(self.clusters),
                                            full_dissimilarity_matrix=self.full_dissimilarity_matrix)
                
        self.scene.add_actor(self.streamlab)
Esempio n. 9
0
def compute_tracking(src_dti_dir, out_trk_dir, subj_name):

    # Loading FA and evecs data
    src_fa_file = os.path.join(src_dti_dir, subj_name + par_fa_suffix)
    fa_img = nib.load(src_fa_file)
    FA = fa_img.get_data()

    src_evecs_file = os.path.join(src_dti_dir, subj_name + par_evecs_suffix)
    evecs_img = nib.load(src_evecs_file)
    evecs = evecs_img.get_data()

    # Computation of streamlines
    sphere = get_sphere('symmetric724')
    peak_indices = dti.quantize_evecs(evecs, sphere.vertices)
    streamlines = EuDX(FA.astype('f8'),
                       ind=peak_indices,
                       seeds=par_eudx_seeds,
                       odf_vertices=sphere.vertices,
                       a_low=par_eudx_threshold)

    # Saving tractography
    voxel_size = fa_img.get_header().get_zooms()[:3]
    dims = FA.shape[:3]
    seed = par_eudx_seeds
    seed = "_%d%s" % (seed / 10**6 if seed > 10**5 else seed / 10**3,
                      'K' if seed < 1000000 else 'M')

    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = voxel_size
    hdr['voxel_order'] = 'LAS'
    hdr['dim'] = dims
    strm = ((sl, None, None) for sl in streamlines
            if length(sl) > par_trk_min and length(sl) < par_trk_max)
    out_trk_file = os.path.join(out_trk_dir, subj_name + seed + par_trk_suffix)
    nib.trackvis.write(out_trk_file, strm, hdr, points_space='voxel')

    tracks = [track for track in streamlines]
    out_dipy_file = os.path.join(out_trk_dir,
                                 subj_name + seed + par_dipy_suffix)
    dpw = Dpy(out_dipy_file, 'w')
    dpw.write_tracks(tracks)
    dpw.close()
Esempio n. 10
0
    def loading_full_tractograpy(self, tracpath=None):
        """
        Loading full tractography and creates StreamlineLabeler to
        show it all.
        """
        # load the tracks registered in MNI space
        self.tracpath = tracpath
        basename = os.path.basename(self.tracpath)
        tracks_basename, tracks_format = os.path.splitext(basename)

        if tracks_format == '.dpy':

            dpr = Dpy(self.tracpath, 'r')
            print "Loading", self.tracpath
            self.T = dpr.read_tracks()
            dpr.close()
            self.T = np.array(self.T, dtype=np.object)

        elif tracks_format == '.trk':
            streams, self.hdr = nib.trackvis.read(self.tracpath,
                                                  points_space='voxel')
            print "Loading", self.tracpath
            self.T = np.array([s[0] for s in streams], dtype=np.object)

        print "Removing short streamlines"
        self.T = np.array([t for t in self.T if length(t) >= 15],
                          dtype=np.object)

        tracks_directoryname = os.path.dirname(self.tracpath) + '/.temp/'
        general_info_filename = tracks_directoryname + tracks_basename + '.spa'

        # Check if there is the .spa file that contains all the
        # computed information from the tractography anyway and try to
        # load it
        try:
            print "Looking for general information file"
            self.load_info(general_info_filename)

        except (IOError, KeyError):
            print "General information not found, recomputing buffers"
            self.update_info(general_info_filename)

        # create the interaction system for tracks,
        self.streamlab = StreamlineLabeler(
            'Bundle Picker',
            self.buffers,
            self.clusters,
            vol_shape=self.dims,
            affine=np.copy(self.affine),
            clustering_parameter=len(self.clusters),
            clustering_parameter_max=len(self.clusters),
            full_dissimilarity_matrix=self.full_dissimilarity_matrix)

        self.scene.add_actor(self.streamlab)
Esempio n. 11
0
def test_eudx():

    #read bvals,gradients and data
    fimg, fbvals, fbvecs = get_data('small_64D')
    bvals = np.load(fbvals)
    gradients = np.load(fbvecs)
    img = ni.load(fimg)
    data = img.get_data()

    print(data.shape)
    gqs = GeneralizedQSampling(data, bvals, gradients)
    ten = Tensor(data, bvals, gradients, thresh=50)
    seed_list = np.dot(np.diag(np.arange(10)), np.ones((10, 3)))
    iT = iter(EuDX(gqs.qa(), gqs.ind(), seeds=seed_list))
    T = []
    for t in iT:
        T.append(t)
    iT2 = iter(EuDX(ten.fa(), ten.ind(), seeds=seed_list))
    T2 = []
    for t in iT2:
        T2.append(t)

    print('length T ', sum([length(t) for t in T]))
    print('length T2', sum([length(t) for t in T2]))

    print(gqs.QA[1, 4, 8, 0])
    print(gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]),
                                        np.array(gqs.QA.strides), 4, 8)])

    assert_almost_equal(
        gqs.QA[1, 4, 8, 0],
        gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]),
                                      np.array(gqs.QA.strides), 4, 8)])

    assert_almost_equal(sum([length(t) for t in T]),
                        70.999996185302734,
                        places=3)
    assert_almost_equal(sum([length(t) for t in T2]),
                        56.999997615814209,
                        places=3)
Esempio n. 12
0
def filter_streamlines_by_length(streamlines,
                                 data_per_point,
                                 data_per_streamline,
                                 min_length=0.,
                                 max_length=np.inf):
    """
    Filter streamlines using minimum and max length

    Parameters
    ----------
    streamlines: list
        List of list of 3D points.

    data_per_point: dict
        dict of data with one value per point per streamline
    data_per_streamline: dict
        dict of data with one value per streamline

    min_length: float
        Minimum length of streamlines.
    max_length: float
        Maximum length of streamlines.

    Return
    ------
    filtered_streamlines: list
        List of filtered streamlines by length.

    filtered_data_per_point: dict
        dict of data per point for filtered streamlines
    filtered_data_per_streamline: dict
        dict of data per streamline for filtered streamlines
    """

    lengths = []
    for streamline in streamlines:
        lengths.append(length(streamline))

    lengths = np.asarray(lengths)

    filter_stream = np.logical_and(lengths >= min_length,
                                   lengths <= max_length)

    filtered_streamlines = list(np.asarray(streamlines)[filter_stream])
    filtered_data_per_point = data_per_point[filter_stream]
    filtered_data_per_streamline = data_per_streamline[filter_stream]

    return filtered_streamlines, filtered_data_per_point, filtered_data_per_streamline
Esempio n. 13
0
def iterate_on_sampler_and_verify(batch_sampler, batch_size, batch_size_units):
    # Default variables
    nb_subjs = len(TEST_EXPECTED_SUBJ_NAMES)

    logging.debug('    Iterating on the batch sampler directly, without '
                  'actually loading the batch.')
    batch_generator = batch_sampler.__iter__()

    # Init to avoid "batch referenced before assignment" in case generator
    # fails

    # Loop on batches
    for batch_idx in batch_generator:
        subj0 = batch_idx[0]
        (subj0_id, subj0_streamlines_idx) = subj0

        nb_streamlines_sampled = len(subj0_streamlines_idx)
        if batch_size_units == 'nb_streamlines':
            logging.debug(
                '     Based on first subject, nb sampled streamlines per subj '
                'was {} \n'
                '     (Batch size should be {} streamlines, result should be '
                'batch_size / nb_subjs ({}) = {})\n'.format(
                    nb_streamlines_sampled, batch_size, nb_subjs,
                    batch_size / nb_subjs))

            assert nb_streamlines_sampled == batch_size / nb_subjs
        else:
            subj0 = batch_sampler.dataset.subjs_data_list[0]
            sub0_sft = subj0.sft_data_list[0].as_sft(subj0_streamlines_idx)
            sub0_sft.to_rasmm()
            lengths = [length(s) for s in sub0_sft.streamlines]
            computed_size = sum(lengths)

            logging.debug(
                '    Based on first subject, nb sampled streamlines per subj '
                'was {} for a total size of {}\n'
                '    (Batch size should be {} in terms of length in mm, '
                'result should be batch_size / nb_subjs ({}) = {})\n'.format(
                    nb_streamlines_sampled, computed_size, batch_size,
                    nb_subjs, batch_size / nb_subjs))

            allowed_error = 200  # Usually, biggest streamline length is 200mm
            assert batch_size - computed_size < allowed_error
        break
Esempio n. 14
0
def filtered_streamlines_by_length(streamlines: List,
                                   minimum=20,
                                   maximum=200) -> List:
    """
    Returns filtered streamlines that are longer than minimum (in mm) and shorter than maximum (in mm)
    Parameters
    ----------
    streamlines
        The streamlines we would like to filter
    minimum
        The minimum length in mm
    maximum
        The maximum length in mm
    Returns
    -------
    List
        The filtered streamlines
    """
    return [x for x in streamlines if minimum <= metrics.length(x) <= maximum]
Esempio n. 15
0
def test_length():
    # Generate a simulated bundle of fibers:
    n_streamlines = 50
    n_pts = 100
    t = np.linspace(-10, 10, n_pts)

    bundle = []
    for i in np.linspace(3, 5, n_streamlines):
        pts = np.vstack((np.cos(2 * t/np.pi), np.zeros(t.shape) + i, t)).T
        bundle.append(pts)

    start = np.random.randint(10, 30, n_streamlines)
    end = np.random.randint(60, 100, n_streamlines)

    bundle = [10 * streamline[start[i]:end[i]] for (i, streamline) in
              enumerate(bundle)]

    bundle_lengths = length(bundle)
    for idx, this_length in enumerate(bundle_lengths):
        assert_equal(this_length, metrix.length(bundle[idx]))
Esempio n. 16
0
def test_length():
    # Generate a simulated bundle of fibers:
    n_streamlines = 50
    n_pts = 100
    t = np.linspace(-10, 10, n_pts)

    bundle = []
    for i in np.linspace(3, 5, n_streamlines):
        pts = np.vstack((np.cos(2 * t / np.pi), np.zeros(t.shape) + i, t)).T
        bundle.append(pts)

    start = np.random.randint(10, 30, n_streamlines)
    end = np.random.randint(60, 100, n_streamlines)

    bundle = [10 * streamline[start[i]:end[i]] for (i, streamline) in
              enumerate(bundle)]

    bundle_lengths = length(bundle)
    for idx, this_length in enumerate(bundle_lengths):
        npt.assert_equal(this_length, metrics.length(bundle[idx]))
Esempio n. 17
0
    def __iter__(self):
        """ This is were all the fun starts """

        x, y, z, g = self.a.shape

        # for all seeds
        for i in range(self.seed_no):
            if self.seed_list == None:
                rx = (x - 1) * np.random.rand()
                ry = (y - 1) * np.random.rand()
                rz = (z - 1) * np.random.rand()
                seed = np.ascontiguousarray(np.array([rx, ry, rz]), dtype=np.float64)
            else:
                seed = np.ascontiguousarray(self.seed_list[i], dtype=np.float64)
            # for all peaks
            for ref in range(g):
                # propagate up and down
                # print g,self.a.shape
                # """
                # print i,seed
                track = eudx_both_directions(
                    seed.copy(),
                    ref,
                    self.a,
                    self.ind,
                    self.odf_vertices,
                    self.a_low,
                    self.ang_thr,
                    self.step_sz,
                    self.total_weight,
                )
                # """
                # track =None
                if track == None:
                    # print 'None'
                    pass
                else:
                    # return a track from that seed
                    if length(track) > self.length_thr:
                        # print 'track'
                        yield track
Esempio n. 18
0
def smooth_line_gaussian(streamline, sigma):
    if sigma < 0.00001:
        ValueError('Cant have a 0 sigma with gaussian.')

    nb_points = int(length(streamline))
    if nb_points < 2:
        logging.debug('Streamline shorter than 1mm, corner cases possible.')
        nb_points = 2
    sampled_streamline = set_number_of_points(streamline, nb_points)

    x, y, z = sampled_streamline.T
    x3 = gaussian_filter1d(x, sigma)
    y3 = gaussian_filter1d(y, sigma)
    z3 = gaussian_filter1d(z, sigma)
    smoothed_streamline = np.asarray([x3, y3, z3]).T

    # Ensure first and last point remain the same
    smoothed_streamline[0] = streamline[0]
    smoothed_streamline[-1] = streamline[-1]

    return smoothed_streamline
Esempio n. 19
0
def smooth_line_spline(streamline, sigma, nb_ctrl_points):
    if sigma < 0.00001:
        ValueError('Cant have a 0 sigma with spline.')

    nb_points = int(length(streamline))
    if nb_points < 2:
        logging.debug('Streamline shorter than 1mm, corner cases possible.')

    if nb_ctrl_points < 3:
        nb_ctrl_points = 3

    sampled_streamline = set_number_of_points(streamline, nb_ctrl_points)

    tck, u = splprep(sampled_streamline.T, s=sigma)
    smoothed_streamline = splev(np.linspace(0, 1, 99), tck)
    smoothed_streamline = np.squeeze(np.asarray([smoothed_streamline]).T)

    # Ensure first and last point remain the same
    smoothed_streamline[0] = streamline[0]
    smoothed_streamline[-1] = streamline[-1]

    return smoothed_streamline
Esempio n. 20
0
def show(T,A,IND,VERTS,scale):
    
    r=fvtk.ren()
    fvtk.clear(r)
    fvtk.add(r,fvtk.line(T,fvtk.red))
    fvtk.show(r)
    
    Td=[downsample(t,20) for t in T]
    C=local_skeleton_clustering(Td,3)
    fvtk.clear(r)
    lent=float(len(T))
    
    for c in C:
        color=np.random.rand(3)
        virtual=C[c]['hidden']/float(C[c]['N'])
        if length(virtual)> virtual_thr: 
            linewidth=100*len(C[c]['indices'])/lent
            if linewidth<1.:
                linewidth=1
            #fvtk.add(r,fvtk.line(virtual,color,linewidth=linewidth))
            #fvtk.add(r,fvtk.label(r,str(len(C[c]['indices'])),pos=virtual[0],scale=3,color=color ))
        #print C[c]['hidden'].shape
    
    print A.shape
    print IND.shape
    print VERTS.shape
    
    all,allo=fvtk.crossing(A,IND,VERTS,scale,True)
    colors=np.zeros((len(all),3))
    for (i,a) in enumerate(all):
        if allo[i][0]==0 and allo[i][1]==0 and allo[i][2]==1:
            pass
        else:            
            colors[i]=cm.boys2rgb(allo[i])
    
    fvtk.add(r,fvtk.line(all,colors))    
    fvtk.show(r)
Esempio n. 21
0
def save_id_tract_plus_sff(tracks_filename, id_file, num_proto, distance, out_fname):
   
    if (tracks_filename[-3:]=='dpy'):
        dpr_tracks = Dpy(tracks_filename, 'r')
        all_tracks=dpr_tracks.read_tracks()
        dpr_tracks.close()
    else:
        all_tracks = load_whole_tract_trk(tracks_filename)
    
    tracks_id = load_pickle(id_file)
    	
    tract = [all_tracks[i] for i  in tracks_id]    
    
    not_tract_fil = []
    id_not_tract_fil = []
    min_len = min(len(i) for i in tract)
    #print 'min_len of cst', min_len
    min_len = min_len*2.2/3#2./3.2# - 20
    for i in np.arange(len(all_tracks)):
        if (i not in tracks_id) and (length(all_tracks[i]) > min_len):
            not_tract_fil.append(all_tracks[i])
            id_not_tract_fil.append(i)
    
    not_tract_fil = np.array(not_tract_fil,dtype=np.object)        
    sff_pro_id = sff(not_tract_fil, num_proto, distance)        
    
    tract_sff_id = []
    for i in tracks_id:
        tract_sff_id.append(i)
        
    for idx in sff_pro_id:        
        tract_sff_id.append(id_not_tract_fil[idx])
        
    #tract_sff_id.append(id_not_tract_fil[i] for i in sff_pro_id)
    print len(tract), len(tract_sff_id)
    save_pickle(out_fname, tract_sff_id)
    return tract_sff_id
Esempio n. 22
0
 def __iter__(self):
     ''' This is were all the fun starts '''
     x,y,z,g=self.a.shape
     #for all seeds
     for i in range(self.seed_no):
         
         if self.seed_list==None:
             rx=(x-1)*np.random.rand()
             ry=(y-1)*np.random.rand()
             rz=(z-1)*np.random.rand()            
             seed=np.ascontiguousarray(np.array([rx,ry,rz]),dtype=np.float64)
         else:
             seed=np.ascontiguousarray(self.seed_list[i],dtype=np.float64)
                         
         #for all peaks
         for ref in range(self.a.shape[-1]): 
             #propagate up and down 
             track =eudx_propagation(seed.copy(),ref,self.a,self.ind,self.odf_vertices,self.a_low,self.ang_thr,self.step_sz)                  
             if track == None:
                 pass
             else:        
                 #tlist.append(track.astype(np.float32))                                        
                 if length(track)>self.length_thr:                        
                     yield track
Esempio n. 23
0
    def __iter__(self):
        ''' This is were all the fun starts '''

        x, y, z, g = self.a.shape

        #for all seeds
        for i in range(self.seed_no):
            if self.seed_list == None:
                rx = (x - 1) * np.random.rand()
                ry = (y - 1) * np.random.rand()
                rz = (z - 1) * np.random.rand()
                seed = np.ascontiguousarray(np.array([rx, ry, rz]),
                                            dtype=np.float64)
            else:
                seed = np.ascontiguousarray(self.seed_list[i],
                                            dtype=np.float64)
            #for all peaks
            for ref in range(g):
                #propagate up and down
                #print g,self.a.shape
                #"""
                #print i,seed
                track = eudx_both_directions(seed.copy(), ref, self.a,
                                             self.ind, self.odf_vertices,
                                             self.a_low, self.ang_thr,
                                             self.step_sz, self.total_weight)
                #"""
                #track =None
                if track == None:
                    #print 'None'
                    pass
                else:
                    #return a track from that seed
                    if length(track) > self.length_thr:
                        #print 'track'
                        yield track
Esempio n. 24
0
def load_tractography(subject_id, subject_pair):

    tract_streamline = {}
    Total_streamline = 0

    for tract in subject_tracts[str(subject_pair)]:

        filename_tractography = '/home/nusrat//targetDirectory/MICCAI2015_DTI_EUDX/' + str(
            subject_id) + '_1M_wmql/wmql_' + str(tract) + '.trk'

        tractography, header = trackvis.read(filename_tractography,
                                             points_space='voxel')
        tractography = [streamline[0] for streamline in tractography]

        tractography = [
            streamline for streamline in tractography
            if length(streamline) >= 15
        ]
        dict_element = (tractography, len(tractography))
        tract_streamline[str(subject_id) + '_' + str(tract)] = dict_element

        Total_streamline = len(tractography) + Total_streamline

    return tract_streamline, Total_streamline
                            odf_vertices=peaks.default_sphere.vertices,
                            a_low=.05,
                            step_sz=.5,
                            seeds=seeds)
affine = streamline_generator.affine
streamlines = list(streamline_generator)

# to include only longer streamlines, so we visualize lesser amount of tract, for hardware reasons

# part for changing including streamlines only longer than  particular length, here 50

from dipy.tracking.metrics import length

longer_streamlines = []
for tract in streamlines:
    if length(tract) > 50.0:
        longer_streamlines.append(tract)

# Streamlines visualization

from dipy.viz import fvtk
from dipy.viz.colormap import line_colors

# Make display objects

streamlines_actor = fvtk.line(longer_streamlines,
                              line_colors(longer_streamlines))

# Add display objects to canvas
r = fvtk.ren()
fvtk.add(r, streamlines_actor)
Esempio n. 26
0
    T.append(s[0])


r=fvtk.ren()
linea=fvtk.line(T,fvtk.red)
fvtk.add(r,linea)
fvtk.show(r)


#for more complicated visualizations use mayavi
#or the new fos when released

dT=[tm.downsample(t,10) for t in T]
C=td.local_skeleton_clustering(dT,d_thr=5)

ldT=[tm.length(t) for t in dT]
#average length
avg_ldT=sum(ldT)/len(dT)
print(avg_ldT)

"""
r=fvtk.ren()
#fvtk.clear(r)
colors=np.zeros((len(T),3))
for c in C:
    color=np.random.rand(1,3)
    for i in C[c]['indices']:
        colors[i]=color
fvtk.add(r,fvtk.line(T,colors,opacity=1))
fvtk.show(r)
"""
Esempio n. 27
0
    filename_1 = 'data/101_tracks_dti_10K.dpy'    
    filename_2 = 'data/104_tracks_dti_10K.dpy'    
    
    prototype_policies = ['random', 'fft', 'sff']
    num_prototypes = 10
    size1 = num_prototypes
    size2 = size1 + 20

    print "Loading tracks."
    dpr_1 = Dpy(filename_1, 'r')
    tracks_1_all = dpr_1.read_tracks()
    dpr_1.close()
    
    tracks_1 = []
    for st in tracks_1_all:
        if (length(st)>50):
            tracks_1.append(st)
    
    tracks_1 = np.array(tracks_1, dtype=np.object)
    
    dpr_2 = Dpy(filename_2, 'r')
    tracks_2_all = dpr_2.read_tracks()
    dpr_2.close()
    
    tracks_2 = []
    for st in tracks_2_all:
        if (length(st)>50):
            tracks_2.append(st)
    
    tracks_2 = np.array(tracks_2, dtype=np.object)
    
Esempio n. 28
0
def truth_length_min(tracks):
    lmin = length(tracks[0], False)        
    for k in range(len(tracks)):
        if lmin>length(tracks[k]):
            lmin = length(tracks[k])        
    return lmin
Esempio n. 29
0
def filter_ellipsoid(sft, ellipsoid_radius, ellipsoid_center,
                     filter_type, is_exclude, is_in_vox=False):
    """
    Parameters
    ----------
    sft : StatefulTractogram
        StatefulTractogram containing the streamlines to segment.
    ellipsoid_radius : numpy.ndarray (3)
        Size in mm, x/y/z of the ellipsoid.
    ellipsoid_center: numpy.ndarray (3)
        Center x/y/z of the ellipsoid.
    filter_type: str
        One of the 3 following choices, 'any', 'either_end', 'both_ends'.
    is_exclude: bool
        Value to indicate if the ROI is an AND (false) or a NOT (true).
    is_in_vox: bool
        Value to indicate if the ROI is in voxel space.
    Returns
    -------
    ids : tuple
        Filtered sft.
        Ids of the streamlines passing through the mask.
    """
    pre_filtered_sft, pre_filtered_indices = \
        pre_filtering_for_geometrical_shape(sft, ellipsoid_radius,
                                            ellipsoid_center, filter_type,
                                            is_in_vox)
    pre_filtered_sft.to_rasmm()
    pre_filtered_sft.to_center()
    pre_filtered_streamlines = pre_filtered_sft.streamlines
    transfo, _, res, _ = sft.space_attributes

    if is_in_vox:
        ellipsoid_center = np.asarray(apply_affine(transfo,
                                                   ellipsoid_center))
    selected_by_ellipsoid = []
    line_based_indices_1 = []
    line_based_indices_2 = []
    # This is still point based (but resampled), I had a ton of problems trying
    # to use something with intersection, but even if I could do it :
    # The result won't be identical to MI-Brain since I am not using the
    # vtkPolydata. Also it won't be identical to TrackVis either,
    # because TrackVis is point-based for Spherical ROI...
    ellipsoid_radius = np.asarray(ellipsoid_radius)
    ellipsoid_center = np.asarray(ellipsoid_center)

    for i, line in enumerate(pre_filtered_streamlines):
        if filter_type == 'any':
            # Resample to 1/10 of the voxel size
            nb_points = max(int(length(line) / np.average(res) * 10), 2)
            line = set_number_of_points(line, nb_points)
            points_in_ellipsoid = np.sum(
                ((line - ellipsoid_center) / ellipsoid_radius) ** 2,
                axis=1)
            if np.argwhere(points_in_ellipsoid <= 1).any():
                # If at least one point was in the ellipsoid, we selected
                # the streamline
                selected_by_ellipsoid.append(pre_filtered_indices[i])
        else:
            points_in_ellipsoid = np.sum(
                ((line[0] - ellipsoid_center) / ellipsoid_radius) ** 2)

            if points_in_ellipsoid <= 1.0:
                line_based_indices_1.append(pre_filtered_indices[i])

            points_in_ellipsoid = np.sum(
                ((line[-1] - ellipsoid_center) / ellipsoid_radius) ** 2)
            if points_in_ellipsoid <= 1.0:
                line_based_indices_2.append(pre_filtered_indices[i])

    # Both endpoints need to be in the mask (AND)
    if filter_type == 'both_ends':
        selected_by_ellipsoid = np.intersect1d(line_based_indices_1,
                                               line_based_indices_2)
    # Only one endpoint needs to be in the mask (OR)
    elif filter_type == 'either_end':
        selected_by_ellipsoid = np.union1d(line_based_indices_1,
                                           line_based_indices_2)

    # If the 'exclude' option is used, the selection is inverted
    if is_exclude:
        selected_by_ellipsoid = np.setdiff1d(range(len(sft)),
                                             np.unique(selected_by_ellipsoid))
    line_based_indices = np.asarray(selected_by_ellipsoid).astype(np.int32)

    # From indices to sft
    streamlines = sft.streamlines[line_based_indices]
    data_per_streamline = sft.data_per_streamline[line_based_indices]
    data_per_point = sft.data_per_point[line_based_indices]

    new_sft = StatefulTractogram.from_sft(streamlines, sft,
                                          data_per_streamline=data_per_streamline,
                                          data_per_point=data_per_point)

    return new_sft, line_based_indices
Esempio n. 30
0
def track_range(a,b):
    lt=length(t) 
    if lt>a and lt<b:
        return True
    return False
Esempio n. 31
0
def truth_length_max(tracks):
    lmax = length(tracks[0], False)        
    for k in range(len(tracks)):
        if lmax<length(tracks[k],False):
            lmax = length(tracks[k])       
    return lmax
Esempio n. 32
0
def truth_length_avg(tracks):
    s = 0            
    for k in range(len(tracks)):
        s = s + length(tracks[k])
    return s/len(tracks)    
seeds = utils.seeds_from_mask(white_matter, density=2)
streamline_generator = EuDX(csapeaks.peak_values, csapeaks.peak_indices,
                            odf_vertices=peaks.default_sphere.vertices,
                            a_low=.05, step_sz=.5, seeds=seeds)
affine = streamline_generator.affine
streamlines = list(streamline_generator)

# to include only longer streamlines, so we visualize lesser amount of tract, for hardware reasons

# part for changing including streamlines only longer than  particular length, here 50

from dipy.tracking.metrics import length  

longer_streamlines = []
for tract in streamlines:
    if length(tract)>50.0:
        longer_streamlines.append(tract)


# Streamlines visualization

from dipy.viz import fvtk
from dipy.viz.colormap import line_colors

# Make display objects

streamlines_actor = fvtk.line(longer_streamlines, line_colors(longer_streamlines))

# Add display objects to canvas
r = fvtk.ren()
fvtk.add(r, streamlines_actor)
Esempio n. 34
0
def filter_ellipsoid(sft, ellipsoid_radius, ellipsoid_center,
                     filter_type, is_not, is_in_vox=False):
    pre_filtered_streamlines, pre_filtered_indices = \
        pre_filtering_for_geometrical_shape(sft, ellipsoid_radius,
                                            ellipsoid_center, filter_type,
                                            is_in_vox)

    transfo, _, res, _ = sft.space_attribute
    if is_in_vox:
        ellipsoid_center = np.asarray(apply_affine(transfo,
                                                   ellipsoid_center))
    selected_by_ellipsoid = []
    line_based_indices_1 = []
    line_based_indices_2 = []
    # This is still point based (but resampled), I had a ton of problems trying
    # to use something with intersection, but even if I could do it :
    # The result won't be identical to MI-Brain since I am not using the
    # vtkPolydata. Also it won't be identical to TrackVis either,
    # because TrackVis is point-based for Spherical ROI...
    ellipsoid_radius = np.asarray(ellipsoid_radius)
    ellipsoid_center = np.asarray(ellipsoid_center)

    for i, line in enumerate(pre_filtered_streamlines):
        if filter_type == 'any':
            # Resample to 1/10 of the voxel size
            nb_points = max(int(length(line) / np.average(res) * 10), 2)
            line = set_number_of_points(line, nb_points)
            points_in_ellipsoid = np.sum(
                ((line - ellipsoid_center) / ellipsoid_radius) ** 2,
                axis=1)
            if np.argwhere(points_in_ellipsoid <= 1).any():
                # If at least one point was in the ellipsoid, we selected
                # the streamline
                selected_by_ellipsoid.append(pre_filtered_indices[i])
        else:
            points_in_ellipsoid = np.sum(
                ((line[0] - ellipsoid_center) / ellipsoid_radius) ** 2)

            if points_in_ellipsoid <= 1.0:
                line_based_indices_1.append(pre_filtered_indices[i])

            points_in_ellipsoid = np.sum(
                ((line[-1] - ellipsoid_center) / ellipsoid_radius) ** 2)
            if points_in_ellipsoid <= 1.0:
                line_based_indices_2.append(pre_filtered_indices[i])

    # Both endpoints need to be in the mask (AND)
    if filter_type == 'both_ends':
        selected_by_ellipsoid = np.intersect1d(line_based_indices_1,
                                               line_based_indices_2)
    # Only one endpoint needs to be in the mask (OR)
    elif filter_type == 'either_end':
        selected_by_ellipsoid = np.union1d(line_based_indices_1,
                                           line_based_indices_2)
    # If the --not option is used, the selection is inverted
    all_indices = range(len(sft))
    if is_not:
        selected_by_ellipsoid = np.setdiff1d(all_indices,
                                             np.unique(selected_by_ellipsoid))

    # From indices to streamlines
    final_streamlines = list(sft.streamlines[
        np.asarray(selected_by_ellipsoid).astype(np.int32)])

    return final_streamlines, selected_by_ellipsoid
Esempio n. 35
0
def filter_cuboid(sft, cuboid_radius, cuboid_center,
                  filter_type, is_not):

    pre_filtered_streamlines, pre_filtered_indices = \
        pre_filtering_for_geometrical_shape(sft, cuboid_radius,
                                            cuboid_center, filter_type,
                                            False)

    _, _, res, _ = sft.space_attribute

    selected_by_cuboid = []
    line_based_indices_1 = []
    line_based_indices_2 = []
    # Also here I am not using a mathematical intersection and
    # I am not using vtkPolyData like in MI-Brain, so not exactly the same
    cuboid_radius = np.asarray(cuboid_radius)
    cuboid_center = np.asarray(cuboid_center)
    for i, line in enumerate(pre_filtered_streamlines):
        if filter_type == 'any':
            # Resample to 1/10 of the voxel size
            nb_points = max(int(length(line)/np.average(res) * 10), 2)
            line = set_number_of_points(line, nb_points)
            points_in_cuboid = np.abs(line - cuboid_center) / cuboid_radius

            points_in_cuboid[points_in_cuboid <= 1] = 1
            points_in_cuboid[points_in_cuboid > 1] = 0
            points_in_cuboid = np.sum(points_in_cuboid, axis=1)

            if np.argwhere(points_in_cuboid == 3).any():
                # If at least one point was in the cuboid, we selected
                # the streamlines
                selected_by_cuboid.append(pre_filtered_indices[i])
        else:
            # Faster to do it twice than trying to do in using an array of 2
            points_in_cuboid = np.abs(line[0] - cuboid_center) / cuboid_radius
            points_in_cuboid[points_in_cuboid <= 1] = 1
            points_in_cuboid[points_in_cuboid > 1] = 0
            points_in_cuboid = np.sum(points_in_cuboid)

            if points_in_cuboid == 3:
                line_based_indices_1.append(pre_filtered_indices[i])

            points_in_cuboid = np.abs(line[-1] - cuboid_center) / cuboid_radius
            points_in_cuboid[points_in_cuboid <= 1] = 1
            points_in_cuboid[points_in_cuboid > 1] = 0
            points_in_cuboid = np.sum(points_in_cuboid)

            if points_in_cuboid == 3:
                line_based_indices_2.append(pre_filtered_indices[i])

    # Both endpoints need to be in the mask (AND)
    if filter_type == 'both_ends':
        selected_by_cuboid = np.intersect1d(line_based_indices_1,
                                            line_based_indices_2)
    # Only one endpoint need to be in the mask (OR)
    elif filter_type == 'either_end':
        selected_by_cuboid = np.union1d(line_based_indices_1,
                                        line_based_indices_2)

    # If the --not option is used, the selection is inverted
    all_indices = range(len(sft))
    if is_not:
        selected_by_cuboid = np.setdiff1d(all_indices,
                                          np.unique(selected_by_cuboid))

    # From indices to streamlines
    # From indices to streamlines
    final_streamlines = list(sft.streamlines[
        np.asarray(selected_by_cuboid).astype(np.int32)])

    return final_streamlines, selected_by_cuboid
Esempio n. 36
0
def nii2streamlines(imgfile, maskfile, bvals, bvecs):
    import numpy as np
    import nibabel as nib
    import os

    from dipy.reconst.dti import TensorModel

    img = nib.load(imgfile)
    bvals = np.genfromtxt(bvals)
    bvecs = np.genfromtxt(bvecs)
    if bvecs.shape[1] != 3:
        bvecs = bvecs.T

    from nipype.utils.filemanip import split_filename
    _, prefix, _ = split_filename(imgfile)

    from dipy.data import gradient_table

    gtab = gradient_table(bvals, bvecs)
    data = img.get_data()
    affine = img.get_affine()
    zooms = img.get_header().get_zooms()[:3]
    new_zooms = (2., 2., 2.)
    data2, affine2 = data, affine
    mask = nib.load(maskfile).get_data().astype(np.bool)
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data2, mask)

    from dipy.reconst.dti import fractional_anisotropy
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    fa_img = nib.Nifti1Image(FA, img.get_affine())
    nib.save(fa_img, '%s_tensor_fa.nii.gz' % prefix)

    evecs = tenfit.evecs

    evec_img = nib.Nifti1Image(evecs, img.get_affine())
    nib.save(evec_img, '%s_tensor_evec.nii.gz' % prefix)

    from dipy.data import get_sphere
    sphere = get_sphere('symmetric724')
    from dipy.reconst.dti import quantize_evecs

    peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices)

    from dipy.tracking.eudx import EuDX

    eu = EuDX(FA,
              peak_indices,
              odf_vertices=sphere.vertices,
              a_low=0.2,
              seeds=10**6,
              ang_thr=35)
    tensor_streamlines = [streamline for streamline in eu]

    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = new_zooms
    hdr['voxel_order'] = 'LPS'
    hdr['dim'] = data2.shape[:3]

    import dipy.tracking.metrics as dmetrics
    tensor_streamlines = ((sl, None, None) for sl in tensor_streamlines
                          if dmetrics.length(sl) > 15)

    ten_sl_fname = '%s_streamline.trk' % prefix

    nib.trackvis.write(ten_sl_fname,
                       tensor_streamlines,
                       hdr,
                       points_space='voxel')
    return ten_sl_fname
    filename_1 = 'data/101_tracks_dti_10K_linear.dpy'    
    filename_2 = 'data/104_tracks_dti_10K_linear.dpy'    
    
    prototype_policies = ['random', 'fft', 'sff']
    num_prototypes = 40
    

    print "Loading tracks."
    dpr_1 = Dpy(filename_1, 'r')
    tracks_1_all = dpr_1.read_tracks()
    dpr_1.close()
    
    tracks_1 = []
    for st in tracks_1_all:
        if (length(st)>45):
            tracks_1.append(st)
    
    tracks_1 = np.array(tracks_1, dtype=np.object)
    
    dpr_2 = Dpy(filename_2, 'r')
    tracks_2_all = dpr_2.read_tracks()
    dpr_2.close()
    
    tracks_2 = []
    for st in tracks_2_all:
        if (length(st)>45):
            tracks_2.append(st)
    
    tracks_2 = np.array(tracks_2, dtype=np.object)
    
Esempio n. 38
0
    def loading_full_tractograpy(self, tracpath=None):
        """
        Loading full tractography and creates StreamlineLabeler to
        show it all.
        """
        # load the tracks registered in MNI space
        self.tracpath=tracpath
        basename = os.path.basename(self.tracpath)
        tracks_basename, tracks_format = os.path.splitext(basename)
        
        if tracks_format == '.dpy': 
            
            dpr = Dpy(self.tracpath, 'r')
            print "Loading", self.tracpath
            self.T = dpr.read_tracks()
            dpr.close()
            self.T = np.array(self.T, dtype=np.object)

            
        elif tracks_format == '.trk': 
            streams, self.hdr = nib.trackvis.read(self.tracpath, points_space='voxel')
            print "Loading", self.tracpath
            self.T = np.array([s[0] for s in streams], dtype=np.object)
         

        print "Removing short streamlines"
        self.T = np.array([t for t in self.T if length(t)>= 15],  dtype=np.object)
        
        tracks_directoryname = os.path.dirname(self.tracpath) + '/.temp/'
        general_info_filename = tracks_directoryname + tracks_basename + '.spa'
        
        
        
        
        # Check if there is the .spa file that contains all the
        # computed information from the tractography anyway and try to
        # load it
        try:
            print "Looking for general information file"
            self.load_info(general_info_filename)
                    
        except IOError:
            print "General information not found, recomputing buffers"

            print "Computing buffers."
            self.buffers = compute_buffers(self.T, alpha=1.0, save=False)
            
            print "Computing dissimilarity matrix"
            self.num_prototypes = 40
            self.full_dissimilarity_matrix = compute_dissimilarity(self.T, distance=bundles_distances_mam, prototype_policy='sff', num_prototypes=self.num_prototypes)
            
            # compute initial MBKM with given n_clusters
            print "Computing MBKM"

            size_T = len(self.T)
            if  size_T > 150:
                n_clusters = 150
            else:
                n_clusters = size_T
                
            streamlines_ids = np.arange(size_T, dtype=np.int)
            self.clusters = mbkm_wrapper(self.full_dissimilarity_matrix, n_clusters, streamlines_ids)
            
        
            print "Saving computed information from tractography"
            
            if not os.path.exists(tracks_directoryname):
                os.makedirs(tracks_directoryname)
            self.save_info(general_info_filename)
            
       
        # create the interaction system for tracks, 
        self.streamlab  = StreamlineLabeler('Bundle Picker',
                                            self.buffers, self.clusters,
                                            vol_shape=self.dims, 
                                            affine=np.copy(self.affine),
                                            clustering_parameter=len(self.clusters),
                                            clustering_parameter_max=len(self.clusters),
                                            full_dissimilarity_matrix=self.full_dissimilarity_matrix)
                
        self.scene.add_actor(self.streamlab)
Esempio n. 39
0
print 'after',bvals2.shape,bvecs2.shape


#always check the threshold but 50 should be okay
ten=Tensor(data2,bvals2,bvecs2,thresh=50)

#ten.ind is indices of the eigen directions projected on a sphere
#stopping criteria is FA of .2
eu=EuDX(a=ten.fa(),ind=ten.ind(),seeds=5000,a_low=0.2)

#generate tracks
ten_tracks=[track for track in eu]
print 'No tracks ',len(ten_tracks)

#remove short tracks smaller than 40mm i.e. 20 in native units
ten_tracks=[t for t in ten_tracks if length(t)>20]
print 'No reduced tracks ',len(ten_tracks)

raw_input('Press enter...')


#load the rois
imsk1=nib.load(dname+'/'+froi1)
roi1=imsk1.get_data()
imsk2=nib.load(dname+'/'+froi2)
roi2=imsk2.get_data()

print 'roi dimensions', roi1.shape,roi2.shape
print 'roi voxels', np.sum(roi1==255),np.sum(roi2==255)
#tcs track counts volume
#tes dictionary of tracks passing from voxels
Esempio n. 40
0
def save_id_tract_ext1(tracks_filename, id_file,  distance, out_fname, thres_len= 2.2/3., thres_vol = 1.2 , thres_dis = 2.8/2.):
    
    print thres_len, thres_vol, thres_dis
    if (tracks_filename[-3:]=='dpy'):
        dpr_tracks = Dpy(tracks_filename, 'r')
        all_tracks=dpr_tracks.read_tracks()
        dpr_tracks.close()
    else:
        all_tracks = load_whole_tract_trk(tracks_filename)    
    
    
    tracks_id = load_pickle(id_file)
    	
    tract = [all_tracks[i] for i  in tracks_id]    
    
    not_tract_fil = []
    id_not_tract_fil = []
    min_len = min(len(i) for i in tract)
    #print 'min_len of cst', min_len
    min_len = min_len*thres_len
    
    for i in np.arange(len(all_tracks)):
        if (i not in tracks_id) and (length(all_tracks[i]) > min_len):
            not_tract_fil.append(all_tracks[i])
            id_not_tract_fil.append(i)
       
    k = np.round(len(tract) * thres_vol  )     
            
    from dipy.segment.quickbundles import QuickBundles
    
    qb = QuickBundles(tract,200,18)
    
    medoid_tract = qb.centroids[0]
    
    med_nottract_dm =  distance([medoid_tract], not_tract_fil)
    med_tract_dm =  distance([medoid_tract], tract)
    
    tract_rad = med_tract_dm[0][np.argmax(med_tract_dm[0])]
    len_dis = tract_rad * thres_dis# 2.8/2.
   
    #k_indices which close to the medoid
    sort = np.argsort(med_nottract_dm,axis = 1)[0]
    #print sort[:k+1]
    while (k>0 and med_nottract_dm[0][sort[k]]>=len_dis):
        k = k - 1
        
    
    #print k
    close_indices = sort[0:k]
    
    #for idx in close_indices:
    #    tract_ext.append(not_tract_fil[idx])          
    #print 'close indices', len(close_indices)
    tract_ext_id = []
    for i in tracks_id:
         tract_ext_id.append(i)
    
    #print 'Before', len(tract_ext_id)
    
    for idx in close_indices:
        tract_ext_id.append(id_not_tract_fil[idx]) 
    #    print idx, id_not_tract_fil[idx]
      
    #print 'After', len(tract_ext_id)
    #tract_ext_id = [i for i in tracks_id]
    #tract_ext_id.append(id_not_tract_fil[i] for i in close_indices)
    
    save_pickle(out_fname, tract_ext_id)
    return tract_ext_id
Esempio n. 41
0
def lengths(tracks):    
    return [length(t) for t in tracks]
Esempio n. 42
0
def generate_lengths(fdpy,fnpy):
    dpr=Dpy(fdpy,'r')
    T=dpr.read_tracks()
    dpr.close()
    lenT=[length(t) for t in T]
    np.save(fnpy,np.array(lenT))    
Esempio n. 43
0
def filter_cuboid(sft, cuboid_radius, cuboid_center,
                  filter_type, is_exclude):
    """
    Parameters
    ----------
    sft : StatefulTractogram
        StatefulTractogram containing the streamlines to segment.
    cuboid_radius : numpy.ndarray (3)
        Size in mm, x/y/z of the cuboid.
    cuboid_center: numpy.ndarray (3)
        Center x/y/z of the cuboid.
    filter_type: str
        One of the 3 following choices, 'any', 'either_end', 'both_ends'.
    is_exclude: bool
        Value to indicate if the ROI is an AND (false) or a NOT (true).
    is_in_vox: bool
        Value to indicate if the ROI is in voxel space.
    Returns
    -------
    ids : tuple
        Filtered sft.
        Ids of the streamlines passing through the mask.
    """
    pre_filtered_sft, pre_filtered_indices = \
        pre_filtering_for_geometrical_shape(sft, cuboid_radius,
                                            cuboid_center, filter_type,
                                            False)
    pre_filtered_sft.to_rasmm()
    pre_filtered_sft.to_center()
    pre_filtered_streamlines = pre_filtered_sft.streamlines
    _, _, res, _ = sft.space_attributes

    selected_by_cuboid = []
    line_based_indices_1 = []
    line_based_indices_2 = []
    # Also here I am not using a mathematical intersection and
    # I am not using vtkPolyData like in MI-Brain, so not exactly the same
    cuboid_radius = np.asarray(cuboid_radius)
    cuboid_center = np.asarray(cuboid_center)
    for i, line in enumerate(pre_filtered_streamlines):
        if filter_type == 'any':
            # Resample to 1/10 of the voxel size
            nb_points = max(int(length(line) / np.average(res) * 10), 2)
            line = set_number_of_points(line, nb_points)
            points_in_cuboid = np.abs(line - cuboid_center) / cuboid_radius
            points_in_cuboid = np.sum(np.where(points_in_cuboid <= 1, 1, 0),
                                      axis=1)

            if np.argwhere(points_in_cuboid == 3).any():
                # If at least one point was in the cuboid in x/y/z,
                # we selected that streamline
                selected_by_cuboid.append(pre_filtered_indices[i])
        else:
            # Faster to do it twice than trying to do in using an array of 2
            points_in_cuboid = np.abs(line[0] - cuboid_center) / cuboid_radius
            points_in_cuboid = np.sum(np.where(points_in_cuboid <= 1, 1, 0))

            if points_in_cuboid == 3:
                line_based_indices_1.append(pre_filtered_indices[i])

            points_in_cuboid = np.abs(line[-1] - cuboid_center) / cuboid_radius
            points_in_cuboid = np.sum(np.where(points_in_cuboid <= 1, 1, 0))

            if points_in_cuboid == 3:
                line_based_indices_2.append(pre_filtered_indices[i])

    # Both endpoints need to be in the mask (AND)
    if filter_type == 'both_ends':
        selected_by_cuboid = np.intersect1d(line_based_indices_1,
                                            line_based_indices_2)
    # Only one endpoint need to be in the mask (OR)
    elif filter_type == 'either_end':
        selected_by_cuboid = np.union1d(line_based_indices_1,
                                        line_based_indices_2)

    # If the 'exclude' option is used, the selection is inverted
    if is_exclude:
        selected_by_cuboid = np.setdiff1d(range(len(sft)),
                                          np.unique(selected_by_cuboid))
    line_based_indices = np.asarray(selected_by_cuboid).astype(np.int32)

    # From indices to sft
    streamlines = sft.streamlines[line_based_indices]
    data_per_streamline = sft.data_per_streamline[line_based_indices]
    data_per_point = sft.data_per_point[line_based_indices]

    new_sft = StatefulTractogram.from_sft(streamlines, sft,
                                          data_per_streamline=data_per_streamline,
                                          data_per_point=data_per_point)

    return new_sft, line_based_indices
Esempio n. 44
0
    def metrics_compute(self, streamlines):
        """
        script to extract metrics
        and save to a tabular file
        spline for spline interpolation
        centre_of_mass
        mean_curvature,
        mean_orientation
        the frenet_serret framework for curvature
        torsion calculations along a streamline.
        https://dipy.org/documentation/1.4.1./reference/dipy.tracking/
        https://dipy.org/documentation/1.1.1./reference/dipy.tracking/
                     

        # from dipy.segment.metric import AveragePointwiseEuclideanMetric
        # from dipy.segment.clustering import QuickBundles
        # # Create the instance of `AveragePointwiseEuclideanMetric` to use.
        # metric = AveragePointwiseEuclideanMetric()
        # qb = QuickBundles(threshold=10., metric=metric)
        # clusters = qb.cluster(streamlines)
        chk:
        https://nipype.readthedocs.io/en/latest/users/examples/dmri_connectivity_advanced.html
        """
        print("extracting metrics")

        metrics_dic = {
            "lengths": list(),
            "average_length": list(),
            "std": list(),
            "mean_curvature": list(),
            "mean_orientation": list(),
            "spline": list(),
            "curvature_scalar": list(),
            "torsion": list()
        }

        # lengths of streamlines
        lengths = [length(s) for s in streamlines]
        lengths = np.array(lengths)

        metrics_dic["lengths"] = lengths.tolist()
        metrics_dic["average_length"] = [
            lengths.mean(),
        ]
        metrics_dic["std"] = [
            lengths.std(),
        ]

        # mean curvature, mean orientation and spline for spline interpolation

        for streamline in streamlines:
            mc = metrics_dic.mean_curvature(streamline)
            metrics_dic["mean_curvature"].append(mc)

            mo = metrics_dic.mean_orientation(streamline)
            metrics_dic["mean_orientation"].append(mo)

            spl = metrics_dic.spline(streamline)
            metrics_dic["spline"].append(spl)

            k = metrics_dic.frenet_serret(streamline)[3]
            metrics_dic["curvature_scalar"].append(k)

            t = metrics_dic.frenet_serret(streamline)[4]
            metrics_dic["torsion"].append(t)

        return metrics_dic
Esempio n. 45
0
    data = img.get_data()
    affine = img.get_affine()
    print len(data)
    
    #load the tracks
    tracks_filename = 'ALS_Data/'+str(subj)+'/DIFF2DEPI_EKJ_64dirs_14/DTI/tracks_dti_'+str(num_seeds)+'M_linear.dpy'    
    dpr_tracks = Dpy(tracks_filename, 'r')
    T = dpr_tracks.read_tracks();
    dpr_tracks.close()    
        
    #print len(T)
    print len(T)   
   
    T = T[:200]
        
    T = [t for t in T if length(t)>= 15]

#    T = [downsample(t, 18) - np.array(data.shape[:3]) / 2. for t in T]
#    axis = np.array([1, 0, 0])
#    theta = - 90. 
#    T = np.dot(T,rotation_matrix(axis, theta))
#    axis = np.array([0, 1, 0])
#    theta = 180. 
#    T = np.dot(T, rotation_matrix(axis, theta))
#    
    #load initial QuickBundles with threshold 30mm
    #fpkl = dname+'data/subj_05/101_32/DTI/qb_gqi_1M_linear_30.pkl'
    #qb=QuickBundles(T, 10., 18)
    #save_pickle(fpkl,qb)
    #qb=load_pickle(fpkl)
    filename_1 = 'data/101_tracks_dti_10K_linear.dpy'    
    filename_2 = 'data/104_tracks_dti_10K_linear.dpy'    
    
    prototype_policies = ['random', 'fft', 'sff']
    num_prototypes = 10
    

    print "Loading tracks."
    dpr_1 = Dpy(filename_1, 'r')
    tracks_1_all = dpr_1.read_tracks()
    dpr_1.close()
    
    tracks_1 = []
    for st in tracks_1_all:
        if (length(st)>20):
            tracks_1.append(st)
    
    tracks_1 = np.array(tracks_1, dtype=np.object)
    
    dpr_2 = Dpy(filename_2, 'r')
    tracks_2_all = dpr_2.read_tracks()
    dpr_2.close()
    
    tracks_2 = []
    for st in tracks_2_all:
        if (length(st)>20):
            tracks_2.append(st)
    
    tracks_2 = np.array(tracks_2, dtype=np.object)