Ejemplo n.º 1
0
def bench_load_trk():
    rng = np.random.RandomState(42)
    dtype = 'float32'
    NB_STREAMLINES = 5000
    NB_POINTS = 1000
    points = [rng.rand(NB_POINTS, 3).astype(dtype)
              for i in range(NB_STREAMLINES)]
    scalars = [rng.rand(NB_POINTS, 10).astype(dtype)
               for i in range(NB_STREAMLINES)]

    repeat = 10

    with InTemporaryDirectory():
        trk_file = "tmp.trk"
        tractogram = Tractogram(points, affine_to_rasmm=np.eye(4))
        TrkFile(tractogram).save(trk_file)

        streamlines_old = [d[0] - 0.5
                           for d in tv.read(trk_file, points_space="rasmm")[0]]
        mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat)
        print("Old: Loaded {:,} streamlines in {:6.2f}".format(NB_STREAMLINES,
                                                               mtime_old))

        trk = nib.streamlines.load(trk_file, lazy_load=False)
        streamlines_new = trk.streamlines
        mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)',
                            repeat)
        print("\nNew: Loaded {:,} streamlines in {:6.2}".format(NB_STREAMLINES,
                                                                mtime_new))
        print("Speedup of {:.2f}".format(mtime_old / mtime_new))
        for s1, s2 in zip(streamlines_new, streamlines_old):
            assert_array_equal(s1, s2)

    # Points and scalars
    with InTemporaryDirectory():

        trk_file = "tmp.trk"
        tractogram = Tractogram(points,
                                data_per_point={'scalars': scalars},
                                affine_to_rasmm=np.eye(4))
        TrkFile(tractogram).save(trk_file)

        streamlines_old = [d[0] - 0.5
                           for d in tv.read(trk_file, points_space="rasmm")[0]]

        scalars_old = [d[1]
                       for d in tv.read(trk_file, points_space="rasmm")[0]]
        mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat)
        msg = "Old: Loaded {:,} streamlines with scalars in {:6.2f}"
        print(msg.format(NB_STREAMLINES, mtime_old))

        trk = nib.streamlines.load(trk_file, lazy_load=False)
        scalars_new = trk.tractogram.data_per_point['scalars']
        mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)',
                            repeat)
        msg = "New: Loaded {:,} streamlines with scalars in {:6.2f}"
        print(msg.format(NB_STREAMLINES, mtime_new))
        print("Speedup of {:2f}".format(mtime_old / mtime_new))
        for s1, s2 in zip(scalars_new, scalars_old):
            assert_array_equal(s1, s2)
Ejemplo n.º 2
0
def bench_load_trk():
    rng = np.random.RandomState(42)
    dtype = 'float32'
    NB_STREAMLINES = 5000
    NB_POINTS = 1000
    points = [rng.rand(NB_POINTS, 3).astype(dtype)
              for i in range(NB_STREAMLINES)]
    scalars = [rng.rand(NB_POINTS, 10).astype(dtype)
               for i in range(NB_STREAMLINES)]

    repeat = 10

    with InTemporaryDirectory():
        trk_file = "tmp.trk"
        tractogram = Tractogram(points, affine_to_rasmm=np.eye(4))
        TrkFile(tractogram).save(trk_file)

        streamlines_old = [d[0] - 0.5
                           for d in tv.read(trk_file, points_space="rasmm")[0]]
        mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat)
        print("Old: Loaded {:,} streamlines in {:6.2f}".format(NB_STREAMLINES,
                                                               mtime_old))

        trk = nib.streamlines.load(trk_file, lazy_load=False)
        streamlines_new = trk.streamlines
        mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)',
                            repeat)
        print("\nNew: Loaded {:,} streamlines in {:6.2}".format(NB_STREAMLINES,
                                                                mtime_new))
        print("Speedup of {:.2f}".format(mtime_old / mtime_new))
        for s1, s2 in zip(streamlines_new, streamlines_old):
            assert_array_equal(s1, s2)

    # Points and scalars
    with InTemporaryDirectory():

        trk_file = "tmp.trk"
        tractogram = Tractogram(points,
                                data_per_point={'scalars': scalars},
                                affine_to_rasmm=np.eye(4))
        TrkFile(tractogram).save(trk_file)

        streamlines_old = [d[0] - 0.5
                           for d in tv.read(trk_file, points_space="rasmm")[0]]

        scalars_old = [d[1]
                       for d in tv.read(trk_file, points_space="rasmm")[0]]
        mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat)
        msg = "Old: Loaded {:,} streamlines with scalars in {:6.2f}"
        print(msg.format(NB_STREAMLINES, mtime_old))

        trk = nib.streamlines.load(trk_file, lazy_load=False)
        scalars_new = trk.tractogram.data_per_point['scalars']
        mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)',
                            repeat)
        msg = "New: Loaded {:,} streamlines with scalars in {:6.2f}"
        print(msg.format(NB_STREAMLINES, mtime_new))
        print("Speedup of {:2f}".format(mtime_old / mtime_new))
        for s1, s2 in zip(scalars_new, scalars_old):
            assert_array_equal(s1, s2)
Ejemplo n.º 3
0
	def _run_interface(self, runtime):
		tracks, header = trk.read(self.inputs.in_file)
		if not isdefined(self.inputs.data_dims):
			data_dims = header['dim']
		else:
			data_dims = self.inputs.data_dims

		if not isdefined(self.inputs.voxel_dims):
			voxel_size = header['voxel_size']
		else:
			voxel_size = self.inputs.voxel_dims

		affine = header['vox_to_ras']

		streams = ((ii[0]) for ii in tracks)
		data = density_map(streams, data_dims, voxel_size)
		if data.max() < 2**15:
		   data = data.astype('int16')

		img = nb.Nifti1Image(data,affine)
		out_file = op.abspath(self.inputs.out_filename)
		nb.save(img, out_file)
		iflogger.info('Track density map saved as {i}'.format(i=out_file))
		iflogger.info('Data Dimensions {d}'.format(d=data_dims))
		iflogger.info('Voxel Dimensions {v}'.format(v=voxel_size))
		return runtime
Ejemplo n.º 4
0
Archivo: io.py Proyecto: qytian/osmosis
def fg_from_trk(trk_file, affine=None):
    """
    Read data from a trackvis .trk file and create a FiberGroup object
    according to the information in it.
    """

    # Generate right away, since we're going to do it anyway:
    read_trk = tv.read(trk_file, as_generator=False)
    fibers_trk = read_trk[0]

    # Per default read from the affine from the file header:
    if affine is not None:
        aff = affine
    else: 
        hdr = read_trk[1]
        aff= tv.aff_from_hdr(hdr)
        # If the header contains a bogus affine, we revert to np.eye(4), so we
        # don't get into trouble later:
        try:
            np.matrix(aff).getI()
        except np.linalg.LinAlgError:
            e_s = "trk file contains bogus header, reverting to np.eye(4)" 
            warnings.warn(e_s)
            aff = np.eye(4)

    fibers = []
    for f in fibers_trk:
        fibers.append(ozf.Fiber(np.array(f[0]).T,affine=aff))

    return ozf.FiberGroup(fibers, affine=aff)
Ejemplo n.º 5
0
def tractography_from_trackvis_file(filename):
    tracts_and_data, header = trackvis.read(filename, points_space='rasmm')

    tracts, scalars, properties = list(zip(*tracts_and_data))

    scalar_names = [n for n in header['scalar_name'] if len(n) > 0]

    #scalar_names_unique = []
    #scalar_names_subcomp = {}
    # for sn in scalar_names:
    #    if re.match('.*_[0-9]{2}', sn):
    #        prefix = sn[:sn.rfind('_')]
    #        if prefix not in scalar_names_unique:
    #            scalar_names_unique.append(prefix)
    #            scalar_names_subcomp[prefix] = int(sn[-2:])
    #        scalar_names_subcomp[prefix] = max(sn[-2:], scalar_names_subcomp[prefix])
    #    else:
    #        scalar_names_unique.append(sn)

    tracts_data = {}
    for i, sn in enumerate(scalar_names):
        if hasattr(sn, 'decode'):
            sn = sn.decode()
        tracts_data[sn] = [scalar[:, i][:, None] for scalar in scalars]

    affine = header['vox_to_ras']
    image_dims = header['dim']

    tr = Tractography(
        tracts, tracts_data,
        affine=affine, image_dims=image_dims
    )

    return tr
Ejemplo n.º 6
0
Archivo: pdb.py Proyecto: arokem/pdb
def fg_from_trk(trk_file, affine=None):
    """
    Read data from a trackvis .trk file and create a FiberGroup object
    according to the information in it.
    """

    # Generate right away, since we're going to do it anyway:
    read_trk = tv.read(trk_file, as_generator=False)
    fibers_trk = read_trk[0]

    # Per default read from the affine from the file header:
    if affine is not None:
        aff = affine
    else:
        hdr = read_trk[1]
        aff = tv.aff_from_hdr(hdr)
        # If the header contains a bogus affine, we revert to np.eye(4), so we
        # don't get into trouble later:
        try:
            np.matrix(aff).getI()
        except np.linalg.LinAlgError:
            e_s = "trk file contains bogus header, reverting to np.eye(4)"
            warnings.warn(e_s)
            aff = np.eye(4)

    fibers = []
    for f in fibers_trk:
        fibers.append(ozf.Fiber(np.array(f[0]).T, affine=aff))

    return ozf.FiberGroup(fibers, affine=aff)
Ejemplo n.º 7
0
def use_camino(mask,maskname='maskA',iteration='20'):

    img=nib.Nifti1Image(mask.astype(np.uint8),np.array([[1,0,0,-31.5],[0,1,0,-31.5],[0,0,1,-32.5],[0,0,0,1]]))
    nib.save(img,'/tmp/'+maskname+'.nii.gz')

    cmd='track -inputfile /home/eg309/Data/orbital_phantoms/dwi_dir/workflow/tractography/_subject_id_subject1/picopdfs_twoten/data_fit_pdfs.Bdouble -seedfile /tmp/'+maskname+'.nii.gz -iterations '+iteration+' -numpds 2  -inputmodel pico -outputfile data_fit_pdfs_tracked'
    pipe(cmd)
    cmd='camino_to_trackvis -i ./data_fit_pdfs_tracked -o data_fit_pdfs_tracked.trk -l 30 -d 64,64,64 -x 1.0,1.0,1.0 --voxel-order LAS'
    pipe(cmd)

    fname='data_fit_pdfs_tracked.trk'
    streams,hdr=tv.read(fname,points_space=None)
    tracks=[s[0] for s in streams]
    
    shape=(64,64,64)
    msk=np.load('/tmp/allmasks.npy')
    #"""
    tracks=[t+np.array([32.,32,32]) for t in tracks]

    tracksA=count_tracks_mask(tracks,shape,msk,1)        
    tracksB=count_tracks_mask(tracks,shape,msk,2)
    tracksC=count_tracks_mask(tracks,shape,msk,3)    
    tracksD=count_tracks_mask(tracks,shape,msk,4)    

    print 'Initial', 'A', 'B', 'C', 'D'
    print len(tracks), len(tracksA), len(tracksB), len(tracksC), len(tracksD)
    #"""
    #show_tracks(tracks)
    alpha=1.
    lw=2.
    bg=(1.,1.,1.,1)    

    """
    def launch(self, data_file, region_volume=None):
        datatype = self._base_before_launch(data_file, region_volume)

        # note the streaming parsing, we do not load the dataset in memory at once
        tract_gen, hdr = trackvis.read(data_file, as_generator=True)

        vox2ras = _SpaceTransform(hdr)
        tract_start_indices = [0]
        tract_region = []

        # we process tracts in bigger chunks to optimize disk write costs
        for tract_bundle in chunk_iter(tract_gen, self.READ_CHUNK):
            tract_bundle = [tr[0] for tr in tract_bundle]

            for tr in tract_bundle:
                tract_start_indices.append(tract_start_indices[-1] + len(tr))
                if region_volume is not None:
                    tract_region.append(self._get_tract_region(tr[0]))

            vertices = numpy.concatenate(tract_bundle) # in voxel space
            vertices = vox2ras.transform(vertices)

            datatype.store_data_chunk("vertices", vertices, grow_dimension=0, close_file=False)

        datatype.tract_start_idx = tract_start_indices
        datatype.tract_region = numpy.array(tract_region, dtype=numpy.int16)
        return datatype
def load_tractography(filename_tractography ):
    
  
    
    tractography, header = trackvis.read(filename_tractography)
    tractography = [streamline[0] for streamline in tractography]
    return  tractography
Ejemplo n.º 10
0
def get_data(tom_fn, seed_fn, out_fn, mean, sdev):
    # Load TOM volume and normalise
    tom = nib.load(tom_fn).get_data()  # 144 x 144 x 144 x 3
    tom = (tom - mean) / sdev  # normalise based on dataset mean/stdev

    # Convert to torch and reshape
    tom = torch.from_numpy(np.float32(tom))
    tom = tom.permute(3, 0, 1, 2)  # channels first for pytorch

    # On-the-fly augmentation
    noise_stdev = torch.rand(1) * 0.05
    noise = torch.normal(mean=torch.zeros(tom.size()),
                         std=torch.ones(tom.size()) * noise_stdev)
    tom += noise

    # Load the seed volume
    seed_vol = nib.load(seed_fn).get_data()
    seed_vol = torch.from_numpy(np.float32(seed_vol))
    seed_vol = seed_vol.permute(3, 0, 1, 2)  # channels first for pytorch

    # Concatenate the seed volume as an extra channel of the first dimension of the TOM volume
    tom_seed = torch.cat((tom, seed_vol), dim=0)

    # Load the tractogram
    streamlines, header = trackvis.read(out_fn)
    streamlines = [s[0] for s in streamlines]
    streamlines = np.array(streamlines)

    # Get seed coordinates and convert streamlines to relative format
    seeds = [sl[0].copy() for sl in streamlines]
    #for i in range(len(streamlines)):
    #    streamlines[i] -= seeds[i]

    # Sort seeds and streamlines by seed points x, then y, then z
    streamlines = list(streamlines)
    streamlines = [
        x for _, x in sorted(
            zip(seeds, streamlines),
            key=lambda pair: [pair[0][0], pair[0][1], pair[0][2]])
    ]
    #seeds = sorted(seeds, key=lambda k: [k[0], k[1], k[2]])

    # automatically converts list to numpy array and reshapes it
    # (num_sl, points_per_sl, 3) -> (sqrt(num_sl), sqrt(num_sl), points_per_sl*3)
    # Performed in 2 successive steps because I don't know if it works if I do it in one step
    streamlines = np.reshape(streamlines,
                             (int(num_streamlines**(1 / 2)),
                              int(num_streamlines**(1 / 2)), num_points, 3))
    streamlines = np.reshape(streamlines,
                             (int(num_streamlines**(1 / 2)),
                              int(num_streamlines**(1 / 2)), num_points * 3))
    tractogram = torch.from_numpy(streamlines)
    tractogram = tractogram.permute(2, 0, 1)  # channels first for pytorch

    # automatically converts list to numpy array and reshapes it
    #seeds = np.reshape(seeds, (int(num_streamlines**(1/2)), int(num_streamlines**(1/2)), 3))
    #seeds = torch.from_numpy(seeds)
    #seeds = seeds.permute(2, 0, 1)

    return [tom_seed, [tractogram, tom_seed]]
Ejemplo n.º 11
0
 def fibers(self):
     def fibers_generator(fibers):
         for rec in fibers:
             yield rec[0]
     data, hdr = trackvis.read(self._filename, as_generator=True,
                               points_space='voxel')
     return fibers_generator(data)
Ejemplo n.º 12
0
    def launch(self, data_file, region_volume=None):
        datatype = self._base_before_launch(data_file, region_volume)

        # note the streaming parsing, we do not load the dataset in memory at once
        tract_gen, hdr = trackvis.read(data_file, as_generator=True)

        vox2ras = _SpaceTransform(hdr)
        tract_start_indices = [0]
        tract_region = []

        # we process tracts in bigger chunks to optimize disk write costs
        for tract_bundle in chunk_iter(tract_gen, self.READ_CHUNK):
            tract_bundle = [tr[0] for tr in tract_bundle]

            for tr in tract_bundle:
                tract_start_indices.append(tract_start_indices[-1] + len(tr))
                if region_volume is not None:
                    tract_region.append(self._get_tract_region(tr[0]))

            vertices = numpy.concatenate(tract_bundle)  # in voxel space
            vertices = vox2ras.transform(vertices)

            datatype.store_data_chunk("vertices",
                                      vertices,
                                      grow_dimension=0,
                                      close_file=False)

        datatype.tract_start_idx = tract_start_indices
        datatype.tract_region = numpy.array(tract_region, dtype=numpy.int16)
        return datatype
Ejemplo n.º 13
0
    def _run_interface(self, runtime):
        tracks, header = trk.read(self.inputs.in_file)
        if not isdefined(self.inputs.data_dims):
            data_dims = header['dim']
        else:
            data_dims = self.inputs.data_dims

        if not isdefined(self.inputs.voxel_dims):
            voxel_size = header['voxel_size']
        else:
            voxel_size = self.inputs.voxel_dims

        affine = header['vox_to_ras']

        streams = ((ii[0]) for ii in tracks)
        data = density_map(streams, data_dims, voxel_size)
        if data.max() < 2**15:
            data = data.astype('int16')

        img = nb.Nifti1Image(data, affine)
        out_file = op.abspath(self.inputs.out_filename)
        nb.save(img, out_file)
        iflogger.info('Track density map saved as {i}'.format(i=out_file))
        iflogger.info('Data Dimensions {d}'.format(d=data_dims))
        iflogger.info('Voxel Dimensions {v}'.format(v=voxel_size))
        return runtime
Ejemplo n.º 14
0
def tractography_from_trackvis_file(filename):
    tracts_and_data, header = trackvis.read(filename, points_space='rasmm')

    tracts, scalars, properties = list(zip(*tracts_and_data))

    scalar_names = [n for n in header['scalar_name'] if len(n) > 0]

    #scalar_names_unique = []
    #scalar_names_subcomp = {}
    # for sn in scalar_names:
    #    if re.match('.*_[0-9]{2}', sn):
    #        prefix = sn[:sn.rfind('_')]
    #        if prefix not in scalar_names_unique:
    #            scalar_names_unique.append(prefix)
    #            scalar_names_subcomp[prefix] = int(sn[-2:])
    #        scalar_names_subcomp[prefix] = max(sn[-2:], scalar_names_subcomp[prefix])
    #    else:
    #        scalar_names_unique.append(sn)

    tracts_data = {}
    for i, sn in enumerate(scalar_names):
        tracts_data[sn] = [scalar[:, i][:, None] for scalar in scalars]

    affine = header['vox_to_ras']
    image_dims = header['dim']

    tr = Tractography(tracts,
                      tracts_data,
                      affine=affine,
                      image_dims=image_dims)

    return tr
Ejemplo n.º 15
0
def get_streamlines_from_trk(trkFile):
    """
    Extracts streamlines from a .trk file.
    Returns a list of streamlines.
    """
    streams, hdr = tv.read(trkFile)
    streamlines = [i[0] for i in streams]
    return (streamlines)
def get_streamlines():
    from nibabel import trackvis as tv
    from dipy.data import get_data

    fname = get_data('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [i[0] for i in streams]
    return streamlines
Ejemplo n.º 17
0
def get_streamlines():
    from nibabel import trackvis as tv
    from dipy.data import get_data

    fname = get_data('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [i[0] for i in streams]
    return streamlines
Ejemplo n.º 18
0
def show(fname):

    streams, hdr = tv.read(fname)
    streamlines = [s[0] for s in streams]

    renderer = fvtk.ren() 
    fvtk_tubes = vtk_a.line(streamlines, opacity=0.2, linewidth=5)
    fvtk.add(renderer, fvtk_tubes)
    fvtk.show(renderer)
Ejemplo n.º 19
0
def loadtrkfile(T_filename, threshold_short_streamlines=10.0):
    """Load tractogram from TRK file and remove short streamlines with
    length below threshold.
    """
    print("Loading %s" % T_filename)
    T, hdr = trackvis.read(T_filename, as_generator=False)
    T = np.array([s[0] for s in T], dtype=np.object)

    return T, hdr
Ejemplo n.º 20
0
def bench_compress_streamlines():
    repeat = 10
    fname = get_data('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [i[0] for i in streams]

    print("Timing compress_streamlines() in Cython ({0} streamlines)".format(len(streamlines)))
    cython_time = measure("compress_streamlines(streamlines)", repeat)
    print("Cython time: {0:.3}sec".format(cython_time))
    del streamlines

    fname = get_data('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [i[0] for i in streams]
    python_time = measure("map(compress_streamlines_python, streamlines)", repeat)
    print("Python time: {0:.2}sec".format(python_time))
    print("Speed up of {0}x".format(python_time/cython_time))
    del streamlines
Ejemplo n.º 21
0
def load_trk(file):
    '''
    load the streamlines data (.trk)
    :param file: data path
    :return:streamlines
    '''
    streams, hdr = trackvis.read(file, points_space="rasmm")
    # streamlines = [s[0] for s in streams]
    return streams, hdr
Ejemplo n.º 22
0
def tract_to_binary(file_in: str, file_out: str, ref_img_path: str):
    HOLE_CLOSING = 0

    # choose from "trk" or "trk_legacy"
    #  Use "trk_legacy" for zenodo dataset v1.1.0 and below
    #  Use "trk" for zenodo dataset v1.2.0
    tracking_format = "trk"

    ref_img = nib.load(ref_img_path)
    ref_affine = ref_img.get_affine()
    ref_shape = ref_img.get_data().shape

    streams, hdr = trackvis.read(file_in)
    streamlines = [s[0] for s in streams]  # list of 2d ndarrays

    if tracking_format == "trk_legacy":
        streams, hdr = trackvis.read(file_in)
        streamlines = [s[0] for s in streams]
    else:
        sl_file = nib.streamlines.load(file_in)
        streamlines = sl_file.streamlines

    # Upsample Streamlines (very important, especially when using DensityMap Threshold. Without upsampling eroded results)
    max_seq_len = abs(ref_affine[0, 0] / 4)
    streamlines = list(utils_trk.subsegment(streamlines, max_seq_len))

    # Remember: Does not count if a fibers has no node inside of a voxel -> upsampling helps, but not perfect
    # Counts the number of unique streamlines that pass through each voxel -> oversampling does not distort result
    dm = utils_trk.density_map(streamlines, ref_affine, ref_shape)

    # Create Binary Map
    dm_binary = dm > 0  # Using higher Threshold problematic, because tends to remove valid parts (sparse fibers)
    dm_binary_c = dm_binary

    # Filter Blobs (might remove valid parts) -> do not use
    # dm_binary_c = remove_small_blobs(dm_binary_c, threshold=10)

    # Closing of Holes (not ideal because tends to remove valid holes, e.g. in MCP) -> do not use
    # size = 1
    # dm_binary_c = ndimage.binary_closing(dm_binary_c, structure=np.ones((size, size, size))).astype(dm_binary.dtype)

    # Save Binary Mask
    dm_binary_img = nib.Nifti1Image(dm_binary_c.astype("uint8"), ref_affine)
    nib.save(dm_binary_img, file_out)
Ejemplo n.º 23
0
def filter_fibers(intrk,
                  outtrk='',
                  fiber_cutoff_lower=20,
                  fiber_cutoff_upper=500):
    """Filters a tractogram based on lower / upper cutoffs.

    Parameters
    ----------
    intrk : TRK file
        Path to a tractogram file in TRK format

    outtrk : TRK file
        Output path for the filtered tractogram

    fiber_cutoff_lower : int
        Lower number of fibers cutoff (Default: 20)

    fiber_cutoff_upper : int
        Upper number of fibers cutoff (Default: 500)
    """
    print("Cut Fiber Filtering")
    print("===================")

    print("Input file for fiber cutting is: %s" % intrk)

    if outtrk == '':
        _, filename = os.path.split(intrk)
        base, ext = os.path.splitext(filename)
        outtrk = os.path.abspath(base + '_cutfiltered' + ext)

    # compute length array
    le = compute_length_array(intrk)

    # cut the fibers smaller than value
    reducedidx = np.where((le > fiber_cutoff_lower)
                          & (le < fiber_cutoff_upper))[0]

    # load trackfile (downside, needs everything in memory)
    fibold, hdrold = tv.read(intrk)

    # rewrite the track vis file with the reduced number of fibers
    outstreams = []
    for i in reducedidx:
        outstreams.append(fibold[i])

    n_fib_out = len(outstreams)
    hdrnew = hdrold.copy()
    hdrnew['n_count'] = n_fib_out

    # print("Compute length array for cutted fibers")
    # le = compute_length_array(streams=outstreams)
    print("Write out file: %s" % outtrk)
    print("Number of fibers out : %d" % hdrnew['n_count'])
    tv.write(outtrk, outstreams, hdrnew)
    print("File wrote : %d" % os.path.exists(outtrk))
 def load(self,T_filename, threshold_short_streamlines=10.0):
        """Load tractogram from TRK file and remove short streamlines with
        length below threshold.
        """
        
        print("Loading %s" % T_filename)
        T, hdr = trackvis.read(T_filename, as_generator=False)
        T = np.array([s[0] for s in T], dtype=np.object)
        #T = np.array([s for s in T if if s in tr], dtype=np.object)
        
        
        print("Before")
        print("%s: %s streamlines" % (T_filename, len(T)))
        
        
        #f=0
        pickle_in = open("AFTract.pickle", "rb")
        st = pickle.load(pickle_in)
        print(st)
        tr=[]
        
        for l in range(len(st)):
            m,k=max((v,i) for i,v in enumerate(st[l]))
            #print("max")
            #print(st[l])
            #print(m)
            #print(k)
            if m>0 :
                #tr[l]=f
                #f=f+1
                #print("################################################")
                #print(tr)
                tr.append(l)
            
        
        print(len(T))
        """for k in range(len(T),0,-1):
            #print("nai")
            if k in tr:
                np.delete(T,T[k],0)
                #print("Asi")
        """
        print(tr)
        tk=[]
        for o in range(len(T)):
            if o not in tr:
                tk.append(o)
        print("length")
        print(len(tk))
        T2=np.delete(T,tk)
        print("After")
        print("%s: %s streamlines" % (T_filename, len(T2)))
        
        return T,T2, hdr
	def _run_interface(self, runtime):
		# Loading the ROI file
	    import nibabel as nib
	    import numpy as np
	    from dipy.tracking import utils

	    img = nib.load(self.inputs.ROI_file)
	    data = img.get_data()
	    affine = img.get_affine()

	    # Getting the FA file
	    img = nib.load(self.inputs.FA_file)
	    FA_data = img.get_data()
	    FA_affine = img.get_affine()

	    # Loading the streamlines
	    from nibabel import trackvis
	    streams, hdr = trackvis.read(self.inputs.trackfile,points_space='rasmm')
	    streamlines = [s[0] for s in streams]
	    streamlines_affine = trackvis.aff_from_hdr(hdr,atleast_v2=True)

	    # Checking for negative values
	    from dipy.tracking._utils import _mapping_to_voxel, _to_voxel_coordinates
	    endpoints = [sl[0::len(sl)-1] for sl in streamlines]
	    lin_T, offset = _mapping_to_voxel(affine, (1.,1.,1.))
	    inds = np.dot(endpoints, lin_T)
	    inds += offset
	    negative_values = np.where(inds <0)[0]
	    for negative_value in sorted(negative_values, reverse=True):
			del streamlines[negative_value]

	    # Constructing the streamlines matrix
	    matrix,mapping = utils.connectivity_matrix(streamlines=streamlines,label_volume=data,affine=streamlines_affine,symmetric=True,return_mapping=True,mapping_as_streamlines=True)
	    matrix[matrix < 10] = 0

	    # Constructing the FA matrix
	    dimensions = matrix.shape
	    FA_matrix = np.empty(shape=dimensions)

	    for i in range(0,dimensions[0]):
	        for j in range(0,dimensions[1]):
	            if matrix[i,j]:
	                dm = utils.density_map(mapping[i,j], FA_data.shape, affine=streamlines_affine)
            		FA_matrix[i,j] = np.mean(FA_data[dm>0])
	            else:
	                FA_matrix[i,j] = 0

	    FA_matrix[np.tril_indices(n=len(FA_matrix))] = 0
	    FA_matrix = FA_matrix.T + FA_matrix - np.diagonal(FA_matrix)

	    from nipype.utils.filemanip import split_filename
	    _, base, _ = split_filename(self.inputs.trackfile)
	    np.savetxt(base + '_FA_matrix.txt',FA_matrix,delimiter='\t')
	    return runtime
Ejemplo n.º 26
0
def bench_compress_streamlines():
    repeat = 10
    fname = get_fnames('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [i[0] for i in streams]

    print("Timing compress_streamlines() in Cython"
          " ({0} streamlines)".format(len(streamlines)))
    cython_time = measure("compress_streamlines(streamlines)", repeat)
    print("Cython time: {0:.3}sec".format(cython_time))
    del streamlines

    fname = get_fnames('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [i[0] for i in streams]
    python_time = measure("map(compress_streamlines_python, streamlines)",
                          repeat)
    print("Python time: {0:.2}sec".format(python_time))
    print("Speed up of {0}x".format(python_time / cython_time))
    del streamlines
def voxelCount():
    tractography, header = trackvis.read(T_A_filename)

    tractography = [streamline[0] for streamline in tractography]

    affine = utils.affine_for_trackvis(voxel_size=np.array([2, 2, 2]))

    print("---Number of voxel---")

    print(len(streamline_mapping(T_A, affine=affine).keys()))
    tkinter.messagebox.showinfo(
        "Voxel", len(streamline_mapping(T_A, affine=affine).keys()))
Ejemplo n.º 28
0
    def __init__(self, filename):
        """Load the TrackVis file """
        self._filename = filename

        self.tracks, self.fiberHeader = niv.read(filename)
        self.fiberCount = self.fiberHeader['n_count'].ravel()[0]

        print "number of fibers ->", self.fiberCount
        self.currentFiber = 0
        self.shape = tuple(self.fiberHeader['dim'])

        # Position at first fiber
        self._rewind()
Ejemplo n.º 29
0
    def __init__(self, filename):
        """Load the TrackVis file """
        self._filename = filename

        self.tracks, self.fiberHeader = niv.read(filename)
        self.fiberCount = self.fiberHeader['n_count'].ravel()[0]
        
        print "number of fibers ->", self.fiberCount
        self.currentFiber = 0
        self.shape = tuple(self.fiberHeader['dim'])

        # Position at first fiber
        self._rewind()
Ejemplo n.º 30
0
def get_data(in_fn, out_fn, mean, sdev):
    # Load TOM volume and preprocess
    tom = nib.load(in_fn).get_data()  # 144 x 144 x 144 x 3
    tom = (tom - mean) / sdev  # normalise based on dataset mean/stdev
    tom = torch.from_numpy(np.float32(tom))
    tom = tom.permute(3, 0, 1, 2)  # channels first for pytorch

    # Load the tractogram
    streamlines, header = trackvis.read(out_fn)
    streamlines = [s[0] for s in streamlines]

    # Preprocess the streamlines
    streamlines = set_number_of_points(streamlines, num_points)
    streamlines = np.array(streamlines)
    if len(streamlines) < num_streamlines:
        temp_streamlines = np.zeros((num_streamlines, num_points, 3))
        temp_streamlines[:streamlines.shape[0], :streamlines.
                         shape[1], :streamlines.shape[2]] = streamlines
        streamlines = np.float32(temp_streamlines)

    # Get middle coordinates and convert streamlines to relative format
    mids = [sl[len(sl) // 2].copy() for sl in streamlines]

    for i in range(len(streamlines)):
        streamlines[i] -= mids[i]

    # Sort mids and streamlines by mid points x, then y, then z
    streamlines = list(streamlines)
    streamlines = [
        x for _, x in sorted(
            zip(mids, streamlines),
            key=lambda pair: [pair[0][0], pair[0][1], pair[0][2]])
    ]
    mids = sorted(mids, key=lambda k: [k[0], k[1], k[2]])

    # automatically converts list to numpy array and reshapes it
    streamlines = np.reshape(streamlines,
                             (int(num_streamlines**(1 / 2)),
                              int(num_streamlines**(1 / 2)), num_points * 3))
    tractogram = torch.from_numpy(streamlines)
    tractogram = tractogram.permute(2, 0, 1)  # channels first for pytorch

    # automatically converts list to numpy array and reshapes it
    #print('Convert to torch...')
    mids = np.reshape(
        mids,
        (int(num_streamlines**(1 / 2)), int(num_streamlines**(1 / 2)), 3))
    mids = torch.from_numpy(mids)
    mids = mids.permute(2, 0, 1)

    return [tom, [mids, tractogram]]
def load(T_filename, threshold_short_streamlines=10.0):
    """Load tractogram from TRK file and remove short streamlines with
    length below threshold.
    """
    print("Loading %s" % T_filename)
    T, hdr = trackvis.read(T_filename, as_generator=False)
    T = np.array([s[0] for s in T], dtype=np.object)
    print("%s: %s streamlines" % (T_filename, len(T)))

    # Removing short artifactual streamlines
    print("Removing (presumably artifactual) streamlines shorter than %s" % threshold_short_streamlines)
    T = np.array([s for s in T if length(s) >= threshold_short_streamlines], dtype=np.object)
    print("%s: %s streamlines" % (T_filename, len(T)))
    return T, hdr
Ejemplo n.º 32
0
def load_streamlines_v2(fn, ref):
    streams, header = trackvis.read(fn)

    data, ref_affine = load_nifti(ref)

    transformed = []
    for sl in streams:
        result = utils.apply_affine(aff=ref_affine, pts=sl[0])
        transformed.append(result)

    transformed = np.array(transformed)
    original = np.array([sl[0] for sl in streams])

    return [original, transformed]
Ejemplo n.º 33
0
def load_trk(file):
    """
    load the streamlines data (.trk)
    Parameters
    ----------
    file: data path

    Return
    ------
    streamlines
    """
    streams, hdr = trackvis.read(file, points_space="rasmm")
    # streamlines = [s[0] for s in streams]
    return streams, hdr
Ejemplo n.º 34
0
def importData(ftrk, ffa, fdwi):
    import nibabel as nib

    fa_img = nib.load(ffa)
    fa = fa_img.get_data()
    affine = fa_img.get_affine()

    img = nib.load(fdwi)
    data = img.get_data()

    from nibabel import trackvis
    streams, hdr = trackvis.read(ftrk)
    streamlines = [s[0] for s in streams]

    return (fa, affine, data, streamlines)
Ejemplo n.º 35
0
def importData(ftrk,ffa,fdwi):  
    import nibabel as nib
    
    fa_img = nib.load(ffa)
    fa = fa_img.get_data()
    affine = fa_img.get_affine()

    img = nib.load(fdwi)
    data = img.get_data()

    from nibabel import trackvis
    streams, hdr = trackvis.read(ftrk)
    streamlines = [s[0] for s in streams]
    
    return (fa,affine,data,streamlines)
Ejemplo n.º 36
0
def gen_dens(tracts, ref_vol, out_dens):
    print('reading sta streamlines')
    streams, hdr = trackvis.read(tracts)
    streamlines = [s[0] for s in streams]

    print('reading reference volume')
    niihdr = nib.load(ref_vol)
    nii = niihdr.get_data()

    print('generating density map from tracts')
    dm = utils.density_map(streamlines, affine=niihdr.affine, vol_dims=nii.shape)
    dm_img = nib.Nifti1Image(dm.astype("uint16"), niihdr.affine)

    print('saving tracts map')
    dm_img.to_filename(out_dens)
Ejemplo n.º 37
0
def load_tracks(method="pmt"):
    from nibabel import trackvis as tv

    dname = "/home/eg309/Data/orbital_phantoms/dwi_dir/subject1/"

    if method == "pmt":
        fname = "/home/eg309/Data/orbital_phantoms/dwi_dir/workflow/tractography/_subject_id_subject1/cam2trk_pico_twoten/data_fit_pdfs_tracked.trk"
        streams, hdr = tv.read(fname, points_space="voxel")
        tracks = [s[0] for s in streams]
    if method == "dti":
        fname = dname + "dti_tracks.dpy"
    if method == "dsi":
        fname = dname + "dsi_tracks.dpy"
    if method == "gqs":
        fname = dname + "gqi_tracks.dpy"
    if method == "eit":
        fname = dname + "eit_tracks.dpy"
    if method in ["dti", "dsi", "gqs", "eit"]:
        dpr_linear = Dpy(fname, "r")
        tracks = dpr_linear.read_tracks()
        dpr_linear.close()

    if method != "pmt":
        tracks = [t - np.array([96 / 2.0, 96 / 2.0, 55 / 2.0]) for t in tracks if track_range(t, 100 / 2.5, 150 / 2.5)]
    tracks = [t for t in tracks if track_range(t, 100 / 2.5, 150 / 2.5)]

    print "final no of tracks ", len(tracks)
    qb = QuickBundles(tracks, 25.0 / 2.5, 18)
    # from dipy.viz import fvtk
    # r=fvtk.ren()
    # fvtk.add(r,fvtk.line(qb.virtuals(),fvtk.red))
    # fvtk.show(r)
    # show_tracks(tracks)#qb.exemplars()[0])
    # qb.remove_small_clusters(40)
    del tracks
    # load
    tl = TrackLabeler(qb, qb.downsampled_tracks(), vol_shape=None, tracks_line_width=3.0, tracks_alpha=1)

    # return tracks
    w = World()
    w.add(tl)
    # create window
    wi = Window(caption="Fos", bgcolor=(1.0, 1.0, 1.0, 1.0), width=1600, height=900)
    wi.attach(w)
    # create window manager
    wm = WindowManager()
    wm.add(wi)
    wm.run()
def get_data(in_fn, out_fn, mean, sdev):
    # Load TOM volume and preprocess
    tom = nib.load(in_fn).get_data()  # 144 x 144 x 144 x 3
    tom = (tom - mean) / sdev  # normalise based on dataset mean/stdev
    tom = torch.from_numpy(np.float32(tom))
    tom = tom.permute(3, 0, 1, 2)  # channels first for pytorch

    # Load the tractogram
    streamlines, header = trackvis.read(out_fn)
    streamlines = [s[0] for s in streamlines]
    streamlines = np.array(streamlines)

    # Get seed coordinates and convert streamlines to relative format
    seeds = [sl[0].copy() for sl in streamlines]
    for i in range(len(streamlines)):
        streamlines[i] -= seeds[i]

    # Sort seeds and streamlines by seed points x, then y, then z
    streamlines = list(streamlines)
    streamlines = [
        x for _, x in sorted(
            zip(seeds, streamlines),
            key=lambda pair: [pair[0][0], pair[0][1], pair[0][2]])
    ]
    seeds = sorted(seeds, key=lambda k: [k[0], k[1], k[2]])

    # automatically converts list to numpy array and reshapes it
    # (num_sl, points_per_sl, 3) -> (sqrt(num_sl), sqrt(num_sl), points_per_sl*3)
    # Performed in 2 successive steps because I don't know if it works if I do it in one step
    streamlines = np.reshape(streamlines,
                             (int(num_streamlines**(1 / 2)),
                              int(num_streamlines**(1 / 2)), num_points, 3))
    streamlines = np.reshape(streamlines,
                             (int(num_streamlines**(1 / 2)),
                              int(num_streamlines**(1 / 2)), num_points * 3))
    tractogram = torch.from_numpy(streamlines)
    tractogram = tractogram.permute(2, 0, 1)  # channels first for pytorch

    # automatically converts list to numpy array and reshapes it
    #print('Convert to torch...')
    seeds = np.reshape(
        seeds,
        (int(num_streamlines**(1 / 2)), int(num_streamlines**(1 / 2)), 3))
    seeds = torch.from_numpy(seeds)
    seeds = seeds.permute(2, 0, 1)

    return [tom, [seeds, tractogram]]
Ejemplo n.º 39
0
def load_or_create(subject, side, len_threshold=20, k=100, outdir='data_als/cache/', seed=0):
    filename = 'data_als/%d/tracks_dti_3M_linear.trk' % subject

    print "Loading", filename
    streamlines, header = read(filename)
    streamlines = np.array(streamlines, dtype=np.object)[:,0]

    # hd = md5(streamlines).hexdigest()
    # print "hexdigest:", hd

    filename_cst = 'data_als/%d/%d_corticospinal_%s_3M.pkl'
    filename_cst = filename_cst % (subject, subject_segmentation[subject], side)
    print "Loading CST", filename_cst
    cst_ids = np.load(filename_cst)
    # cst_streamlines = streamlines[cst_ids]

    print "Building the dissimilarity representation."
    try:
        filename_prototypes = outdir+'Pi_ids_%d_%s.npy' % (subject, side)
        print "Trying to load", filename_prototypes
        Pi_ids = np.load(filename_prototypes)
        print "Done."
    except IOError:
        print "Not found."
        print "Creating prototypes."
        lenghts = np.array([len(s) for s in streamlines])
        streamlines_long_ids = np.where(lenghts > len_threshold)[0] # using long streamlines heuristic
        distance = bundles_distances_mam
        np.random.seed(seed)
        Pi_ids = streamlines_long_ids[subset_furthest_first(streamlines[streamlines_long_ids], k=k, distance=distance)] # using long streamlines heuristic
        print "Saving", filename_prototypes
        np.save(filename_prototypes, Pi_ids)
        Pi = streamlines[Pi_ids]
        
    try:
        filename_dr = outdir+'dr_%d_%s.npy' % (subject, side)
        print "Trying to load", filename_dr
        dr = np.load(filename_dr)
        print "Done."
    except IOError:
        print "Not found."
        print "Computing the dissimilarity matrix."
        dr = bundles_distances_mam(streamlines, Pi).astype(np.float32)
        print "Saving", filename_dr
        np.save(filename_dr, dr.astype(np.float32))

    return streamlines, cst_ids, Pi_ids, dr
Ejemplo n.º 40
0
    def load_lines(self, filename):
        """ Load tractogram from a tractography file, include tck, trk, vtk)

        Parameters
        ----------
        filename: str
            Pathstr to a tractography file

        Returns
        -------
        self: a Lines object
        """
        if filename.endswith(('.tck')):
            self.lines = tck.TckFile.load(filename)
        elif filename.endswith(('.trk')):
            self.lines = trackvis.read(filename, points_space="rasmm")
        else:
            print('No more formats are now supported.')
Ejemplo n.º 41
0
def get_data(in_fn, out_fn, mean, sdev):
    # Load TOM volume
    tom = nib.load(in_fn).get_data() # 144 x 144 x 144 x 3

    # Preprocess input
    tom = (tom - mean) / sdev # normalise based on dataset mean/stdev
    do_flip_X = False if random.randint(0,1) == 0 else True
    do_flip_Y = False if random.randint(0,1) == 0 else True
    do_flip_Z = False if random.randint(0,1) == 0 else True
    if do_flip_X:
        tom = tom[::-1,:,:]
    if do_flip_Y:
        tom = tom[:,::-1,:]
    if do_flip_Z:
        tom = tom[:,:,::-1]
    tom = torch.from_numpy(np.float32(tom))
    tom = tom.permute(3, 0, 1, 2) # channels first for pytorch
    
    # Load the tractogram
    streamlines, header = trackvis.read(out_fn)
    streamlines = [s[0] for s in streamlines]

    # Preprocess the streamlines
    streamlines = set_number_of_points(streamlines, num_points)
    streamlines = np.array(streamlines)
    if len(streamlines) < num_streamlines:
        temp_streamlines = np.zeros((num_streamlines, num_points, 3))
        temp_streamlines[:streamlines.shape[0],:streamlines.shape[1], :streamlines.shape[2]] = streamlines
        streamlines = np.float32(temp_streamlines)

    # Convert to relative format
    seeds = [sl[0].copy() for sl in streamlines]
    for i in range(len(streamlines)):
        streamlines[i] -= seeds[i]

    streamlines = np.reshape(streamlines, (int(num_streamlines**(1/2)), int(num_streamlines**(1/2)), num_points*3))
    tractogram = torch.from_numpy(streamlines)
    tractogram = tractogram.permute(2, 0, 1) # channels first for pytorch

    seeds = np.reshape(seeds, (int(num_streamlines**(1/2)), int(num_streamlines**(1/2)), 3))
    seeds = torch.from_numpy(seeds)
    seeds = seeds.permute(2, 0, 1)

    return [tom, [seeds, tractogram]]
Ejemplo n.º 42
0
    def _run_interface(self, runtime):
        from numpy import min_scalar_type
        from dipy.tracking.utils import density_map
        import nibabel.trackvis as nbt

        tracks, header = nbt.read(self.inputs.in_file)
        streams = ((ii[0]) for ii in tracks)

        if isdefined(self.inputs.reference):
            refnii = nb.load(self.inputs.reference)
            affine = refnii.affine
            data_dims = refnii.shape[:3]
            kwargs = dict(affine=affine)
        else:
            IFLOGGER.warning(
                "voxel_dims and data_dims are deprecated as of dipy "
                "0.7.1. Please use reference input instead")

            if not isdefined(self.inputs.data_dims):
                data_dims = header["dim"]
            else:
                data_dims = self.inputs.data_dims
            if not isdefined(self.inputs.voxel_dims):
                voxel_size = header["voxel_size"]
            else:
                voxel_size = self.inputs.voxel_dims

            affine = header["vox_to_ras"]
            kwargs = dict(voxel_size=voxel_size)

        data = density_map(streams, data_dims, **kwargs)
        data = data.astype(min_scalar_type(data.max()))
        img = nb.Nifti1Image(data, affine)
        out_file = op.abspath(self.inputs.out_filename)
        nb.save(img, out_file)

        IFLOGGER.info(
            "Track density map saved as %s, size=%s, dimensions=%s",
            out_file,
            img.shape,
            img.header.get_zooms(),
        )

        return runtime
Ejemplo n.º 43
0
def get_streamlines_plot(path, ref_img_path, subsampling=10):
    affine = nib.load(ref_img_path).affine

    streams, hdr = trackvis.read(path)
    streamlines = [s[0] for s in streams]
    streamlines = list(move_streamlines(streamlines, np.linalg.inv(affine)))

    traces = []
    for sl in streamlines[::subsampling]:
        color = get_voxelwise_orientation_colormap([sl],
                                                   orientation="saggital")[0]
        x, y, z, = zip(*sl)
        trace = go.Scatter3d(x=x,
                             y=y,
                             z=z,
                             line=dict(color=color, width=2),
                             mode="lines")
        traces.append(trace)
    return traces
def filterlength(dname, fdwi, ffa, ftrk, thr_length, show=False):

    fa_img = nib.load(ffa)
    fa = fa_img.get_data()
    affine = fa_img.get_affine()

    img = nib.load(fdwi)
    data = img.get_data()

    from nibabel import trackvis
    streams, hdr = trackvis.read(ftrk)
    streamlines = [s[0] for s in streams]

    # threshold on streamline length

    from dipy.tracking.utils import length
    lengths = list(length(streamlines))

    new_streamlines = [
        s for s, l in zip(streamlines, lengths) if l > thr_length
    ]  #3.5

    # info length streamlines

    print(len(streamlines))
    print(len(new_streamlines))

    print(max(length(streamlines)))
    print(min(length(streamlines)))

    print(max(length(new_streamlines)))
    print(min(length(new_streamlines)))

    # show new tracto

    new_streamlines = list(new_streamlines)
    new_lengths = list(length(new_streamlines))

    fnew_tractogram = dname + 'filteredtractogram.trk'
    save_trk_old_style(fnew_tractogram, new_streamlines, affine, fa.shape)

    if show:
        show_results(data, new_streamlines, fa, affine, opacity=0.6)
Ejemplo n.º 45
0
def load_PX_tracks():

    roi = "LH_premotor"

    dn = "/home/hadron/from_John_mon12thmarch"
    dname = "/extra_probtrackX_analyses/_subject_id_subj05_101_32/particle2trackvis_" + roi + "_native/"
    fname = dn + dname + "tract_samples.trk"
    from nibabel import trackvis as tv

    points_space = [None, "voxel", "rasmm"]
    streamlines, hdr = tv.read(fname, as_generator=True, points_space="voxel")
    tracks = [s[0] for s in streamlines]
    del streamlines
    # return tracks

    qb = QuickBundles(tracks, 25.0 / 2.5, 18)
    # tl=Line(qb.exemplars()[0],line_width=1)
    del tracks
    qb.remove_small_clusters(20)

    tl = TrackLabeler(qb, qb.downsampled_tracks(), vol_shape=None, tracks_line_width=3.0, tracks_alpha=1)

    # put the seeds together
    # seeds=np.vstack((seeds,seeds2))
    # shif the seeds
    # seeds=np.dot(mat[:3,:3],seeds.T).T + mat[:3,3]
    # seeds=seeds-shift
    # seeds2=np.dot(mat[:3,:3],seeds2.T).T + mat[:3,3]
    # seeds2=seeds2-shift
    # msk = Point(seeds,colors=(1,0,0,1.),pointsize=2.)
    # msk2 = Point(seeds2,colors=(1,0,.ppppp2,1.),pointsize=2.)
    w = World()
    w.add(tl)
    # w.add(msk)
    # w.add(msk2)
    # w.add(sl)
    # create window
    wi = Window(caption="Fos", bgcolor=(0.3, 0.3, 0.6, 1.0), width=1600, height=900)
    wi.attach(w)
    # create window manager
    wm = WindowManager()
    wm.add(wi)
    wm.run()
Ejemplo n.º 46
0
def getPointsFromTrack(filename):
	geo = hou.pwd().geometry()
	# Read in stream data 
	streams, hdr = trackvis.read(filename)

	streamlines = [s[0] for s in streams]

	# For each streamline add a curve to the geometry
	j = 0
	for stream in streamlines:		
		i = 0
		curve = geo.createNURBSCurve(len(stream))
		if hou.updateProgressAndCheckForInterrupt(int(float(j)/float(len(streamlines))*100)):
			break
		for vertex in curve.vertices():
		    vertex.point().setPosition((float(stream[i][0]),float(stream[i][1]),float(stream[i][2])))
		    i = i + 1
		    if hou.updateProgressAndCheckForInterrupt():
        		break
        j = j+1
Ejemplo n.º 47
0
def bundle_tracks(in_file, dist_thr=40., pts=16, skip=80.):
    import subprocess
    import os.path as op
    from nibabel import trackvis as tv
    from dipy.segment.quickbundles import QuickBundles
    streams, hdr = tv.read(in_file)
    streamlines = [i[0] for i in streams]
    qb = QuickBundles(streamlines, float(dist_thr), int(pts))
    clusters = qb.clustering
    #scalars = [i[0] for i in streams]

    out_files = []
    name = "quickbundle_"
    n_clusters = clusters.keys()
    print("%d clusters found" % len(n_clusters))

    new_hdr = tv.empty_header()
    new_hdr['n_scalars'] = 1

    for cluster in clusters:
        cluster_trk = op.abspath(name + str(cluster) + ".trk")
        print("Writing cluster %d to %s" % (cluster, cluster_trk))
        out_files.append(cluster_trk)
        clust_idxs = clusters[cluster]['indices']
        new_streams = [streamlines[i] for i in clust_idxs]
        for_save = [(sl, None, None) for sl in new_streams]
        tv.write(cluster_trk, for_save, hdr)

    out_merged_file = "MergedBundles.trk"
    command_list = ["track_merge"]
    command_list.extend(out_files)
    command_list.append(out_merged_file)
    subprocess.call(command_list)
    out_scene_file = write_trackvis_scene(out_merged_file,
                                          n_clusters=len(clusters),
                                          skip=skip,
                                          names=None,
                                          out_file="NewScene.scene")
    print("Merged track file written to %s" % out_merged_file)
    print("Scene file written to %s" % out_scene_file)
    return out_files, out_merged_file, out_scene_file
def filterlength(dname, fdwi, ffa, ftrk, thr_length, show=False):
    
    fa_img = nib.load(ffa)
    fa = fa_img.get_data()
    affine = fa_img.get_affine()

    img = nib.load(fdwi)
    data = img.get_data()
    
    from nibabel import trackvis
    streams, hdr = trackvis.read(ftrk)
    streamlines = [s[0] for s in streams]
    
    # threshold on streamline length

    from dipy.tracking.utils import length
    lengths = list(length(streamlines))

    new_streamlines = [ s for s, l in zip(streamlines, lengths) if l > thr_length ] #3.5
    
    # info length streamlines

    print(len(streamlines))
    print(len(new_streamlines))

    print(max(length(streamlines)))
    print(min(length(streamlines)))

    print(max(length(new_streamlines)))
    print(min(length(new_streamlines)))
    
    # show new tracto

    new_streamlines = list(new_streamlines)
    new_lengths = list(length(new_streamlines))

    fnew_tractogram = dname + 'filteredtractogram.trk'
    save_trk_old_style(fnew_tractogram, new_streamlines, affine, fa.shape)

    if show:
        show_results(data, new_streamlines, fa, affine, opacity=0.6)
Ejemplo n.º 49
0
def compute_length_array(trkfile=None, streams=None, savefname = 'lengths.npy'):
    if streams is None and not trkfile is None:
        print("Compute length array for fibers in %s" % trkfile)
        streams, hdr = tv.read(trkfile, as_generator = True)
        n_fibers = hdr['n_count']
        if n_fibers == 0:
            msg = "Header field n_count of trackfile %s is set to 0. No track seem to exist in this file." % trkfile
            print(msg)
            raise Exception(msg)
    else:
        n_fibers = len(streams)
        
    leng = np.zeros(n_fibers, dtype = np.float)
    for i,fib in enumerate(streams):
        leng[i] = length(fib[0])
    
    # store length array
    np.save(savefname, leng)
    print("Store lengths array to: %s" % savefname)
    
    return leng
Ejemplo n.º 50
0
    def _run_interface(self, runtime):
        from numpy import min_scalar_type
        tracks, header = nbt.read(self.inputs.in_file)
        streams = ((ii[0]) for ii in tracks)

        if isdefined(self.inputs.reference):
            refnii = nb.load(self.inputs.reference)
            affine = refnii.get_affine()
            data_dims = refnii.get_shape()[:3]
            kwargs = dict(affine=affine)
        else:
            iflogger.warn(('voxel_dims and data_dims are deprecated'
                           'as of dipy 0.7.1. Please use reference '
                           'input instead'))

            if not isdefined(self.inputs.data_dims):
                data_dims = header['dim']
            else:
                data_dims = self.inputs.data_dims
            if not isdefined(self.inputs.voxel_dims):
                voxel_size = header['voxel_size']
            else:
                voxel_size = self.inputs.voxel_dims

            affine = header['vox_to_ras']
            kwargs = dict(voxel_size=voxel_size)

        data = density_map(streams, data_dims, **kwargs)
        data = data.astype(min_scalar_type(data.max()))
        img = nb.Nifti1Image(data, affine)
        out_file = op.abspath(self.inputs.out_filename)
        nb.save(img, out_file)

        iflogger.info(
            ('Track density map saved as {i}, size={d}, '
             'dimensions={v}').format(
                i=out_file,
                d=img.get_shape(),
                v=img.get_header().get_zooms()))
        return runtime
Ejemplo n.º 51
0
 def _run_interface(self, runtime):
     
     tracts, hdr = tv.read(self.inputs.trackvis_file, as_generator=True)
     
     self.stat_files_data = []
     self.max_maps_data = []
     self.mean_maps_data = []
     for stat_file in self.inputs.stat_files:
         fmri_nii = nb.load(stat_file)
         self.stat_files_data.append(fmri_nii.get_data())
     
         self.max_maps_data.append(np.zeros(fmri_nii.get_header().get_data_shape()))
         self.mean_maps_data.append(np.zeros(fmri_nii.get_header().get_data_shape()))
     
     hdr = hdr.copy()
     if isdefined(self.inputs.stat_labels) and len(self.inputs.stat_labels) == len(self.inputs.stat_files):
         for i, label in enumerate(self.inputs.stat_labels):
             hdr['property_name'][i] = ('max_%s'%label)[0:19]
             #hdr['property_name'][1+i*2] = 'stat_mean_%s'%label
     else:        
         for i in range(len(self.inputs.stat_files)):
             hdr['property_name'][i] = ('max%d'%i)[0:19]
             #hdr['property_name'][1+i*2] = 'stat_mean%d'%i
     
     tv.write(self.inputs.out_tracks, self._gen_annotate_tracts(tracts, hdr), hdr)
     
     if isdefined(self.inputs.stat_labels) and len(self.inputs.stat_labels) == len(self.inputs.stat_files):
         for i, label in enumerate(self.inputs.stat_labels):
             nb.save(nb.Nifti1Image(self.max_maps_data[i], fmri_nii.get_affine(), fmri_nii.get_header()), self.inputs.out_max_map_prefix + "_%s"%label + '.nii')
             nb.save(nb.Nifti1Image(self.mean_maps_data[i], fmri_nii.get_affine(), fmri_nii.get_header()), self.inputs.out_mean_map_prefix + "_%s"%label + '.nii')
     else:
         for i in range(len(self.inputs.stat_files)):
             nb.save(nb.Nifti1Image(self.max_maps_data[i], fmri_nii.get_affine(), fmri_nii.get_header()), self.inputs.out_max_map_prefix + str(i) + '.nii')
             nb.save(nb.Nifti1Image(self.mean_maps_data[i], fmri_nii.get_affine(), fmri_nii.get_header()), self.inputs.out_mean_map_prefix + str(i) + '.nii')
     
     del self.mean_maps_data
     del self.max_maps_data
     del self.stat_files_data
         
     return runtime
Ejemplo n.º 52
0
    def _run_interface(self, runtime):
        from numpy import min_scalar_type
        from dipy.tracking.utils import density_map

        tracks, header = nbt.read(self.inputs.in_file)
        streams = ((ii[0]) for ii in tracks)

        if isdefined(self.inputs.reference):
            refnii = nb.load(self.inputs.reference)
            affine = refnii.affine
            data_dims = refnii.shape[:3]
            kwargs = dict(affine=affine)
        else:
            IFLOGGER.warn(
                'voxel_dims and data_dims are deprecated as of dipy 0.7.1. Please use reference '
                'input instead')

            if not isdefined(self.inputs.data_dims):
                data_dims = header['dim']
            else:
                data_dims = self.inputs.data_dims
            if not isdefined(self.inputs.voxel_dims):
                voxel_size = header['voxel_size']
            else:
                voxel_size = self.inputs.voxel_dims

            affine = header['vox_to_ras']
            kwargs = dict(voxel_size=voxel_size)

        data = density_map(streams, data_dims, **kwargs)
        data = data.astype(min_scalar_type(data.max()))
        img = nb.Nifti1Image(data, affine)
        out_file = op.abspath(self.inputs.out_filename)
        nb.save(img, out_file)

        IFLOGGER.info(
            'Track density map saved as %s, size=%s, dimensions=%s',
            out_file, img.shape, img.header.get_zooms())

        return runtime
Ejemplo n.º 53
0
def read_trk(fname):
    """
    Read from a .trk file, return streamlines and header

    Parameters
    ----------
    fname : str
        Full path to a trk file containing

    Returns
    -------
    list : list of streamlines (3D coordinates)

    Notes
    -----
    We assume that all streamlines are provided with the "rasmm" points_space.
    That is, they have been transformed to the space reported by the affine
    associated with the image from whence it came, and saved with this affine
    (e.g., using `write_trk`).

    """
    streams, hdr = trackvis.read(fname, points_space="rasmm")
    return [s[0] for s in streams]
Ejemplo n.º 54
0
def bundle_tracks(in_file, dist_thr=40., pts = 16, skip=80.):
    import subprocess
    import os.path as op
    from nibabel import trackvis as tv
    from dipy.segment.quickbundles import QuickBundles
    streams, hdr = tv.read(in_file)
    streamlines = [i[0] for i in streams]
    qb = QuickBundles(streamlines, float(dist_thr), int(pts))
    clusters = qb.clustering
    #scalars = [i[0] for i in streams]

    out_files = []
    name = "quickbundle_"
    n_clusters = clusters.keys()
    print("%d clusters found" % len(n_clusters))

    new_hdr = tv.empty_header()
    new_hdr['n_scalars'] = 1

    for cluster in clusters:
        cluster_trk = op.abspath(name + str(cluster) + ".trk")
        print("Writing cluster %d to %s" % (cluster, cluster_trk))
        out_files.append(cluster_trk)
        clust_idxs = clusters[cluster]['indices']
        new_streams =  [ streamlines[i] for i in clust_idxs ]
        for_save = [(sl, None, None) for sl in new_streams]
        tv.write(cluster_trk, for_save, hdr)
    
    out_merged_file = "MergedBundles.trk"
    command_list = ["track_merge"]
    command_list.extend(out_files)
    command_list.append(out_merged_file)
    subprocess.call(command_list)
    out_scene_file = write_trackvis_scene(out_merged_file, n_clusters=len(clusters), skip=skip, names=None, out_file = "NewScene.scene")
    print("Merged track file written to %s" % out_merged_file)
    print("Scene file written to %s" % out_scene_file)
    return out_files, out_merged_file, out_scene_file
Ejemplo n.º 55
0
def filter_fibers(applied_spline=False):
    
    log.info("Cut Fiber Filtering")
    log.info("===================")
    log.info("Was spline filtering applied? %s" % applied_spline)
    
    if applied_spline:
        intrk = op.join(gconf.get_cmp_fibers(), 'streamline_splinefiltered.trk')
    else:
        intrk = op.join(gconf.get_cmp_fibers(), 'streamline.trk')
        
    log.info("Input file for fiber cutting is: %s" % intrk)
    
    outtrk = op.join(gconf.get_cmp_fibers(), 'streamline_cutfiltered.trk')
    
    # compute length array
    le = compute_length_array(intrk, savefname = 'lengths_beforecutfiltered.npy')
    
    # cut the fibers smaller than value
    reducedidx = np.where((le>gconf.fiber_cutoff_lower) & (le<gconf.fiber_cutoff_upper))[0]
    
    # load trackfile (downside, needs everything in memory)
    fibold, hdrold = tv.read(intrk)
    
    # rewrite the track vis file with the reduced number of fibers
    outstreams = []
    for i in reducedidx:
        outstreams.append( fibold[i] )
    
    n_fib_out = len(outstreams)
    hdrnew = hdrold.copy()
    hdrnew['n_count'] = n_fib_out
    
    log.info("Compute length array for cutted fibers")
    le = compute_length_array(streams=outstreams)
    log.info("Write out file: %s" % outtrk)
    tv.write(outtrk, outstreams, hdrnew)
Ejemplo n.º 56
0
def filter_fibers(intrk, outtrk='', fiber_cutoff_lower=20, fiber_cutoff_upper=500):
    
    print("Cut Fiber Filtering")
    print("===================")
        
    print("Input file for fiber cutting is: %s" % intrk)
    
    if outtrk == '':
        path, filename = os.path.split(intrk)
        base, ext = os.path.splitext(filename)
        outtrk = os.path.abspath(base + '_cutfiltered' + ext)
    
    # compute length array
    le = compute_length_array(intrk)
    
    # cut the fibers smaller than value
    reducedidx = np.where((le>fiber_cutoff_lower) & (le<fiber_cutoff_upper))[0]
    
    # load trackfile (downside, needs everything in memory)
    fibold, hdrold = tv.read(intrk)
    
    # rewrite the track vis file with the reduced number of fibers
    outstreams = []
    for i in reducedidx:
        outstreams.append( fibold[i] )
    
    n_fib_out = len(outstreams)
    hdrnew = hdrold.copy()
    hdrnew['n_count'] = n_fib_out
    
    #print("Compute length array for cutted fibers")
    #le = compute_length_array(streams=outstreams)
    print("Write out file: %s" % outtrk)
    print("Number of fibers out : %d" % hdrnew['n_count'])
    tv.write(outtrk, outstreams, hdrnew)
    print("File wrote : %d" % os.path.exists(outtrk))
Ejemplo n.º 57
0
def fornix_streamlines(no_pts=12):
    fname = get_data('fornix')
    streams, hdr = tv.read(fname)
    streamlines = [set_number_of_points(i[0], no_pts) for i in streams]
    return streamlines
Ejemplo n.º 58
0
from dipy.io import pickles as pkl
from dipy.viz import fvtk


#fname='/home/user/Data_Backup/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'
#fname='/home/user/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'
from dipy.data import get_data

fname=get_data('fornix')
print(fname)

"""
Load Trackvis file for *Fornix*:
"""

streams,hdr=tv.read(fname)

"""
Copy tracks:
"""

T=[i[0] for i in streams]

#T=T[:1000]

"""
Downsample tracks to just 3 points:
"""

tracks=[tm.downsample(t,3) for t in T]
Ejemplo n.º 59
0
Archivo: fetcher.py Proyecto: nipy/dipy
def read_bundles_2_subjects(subj_id="subj_1", metrics=["fa"], bundles=["af.left", "cst.right", "cc_1"]):
    r""" Read images and streamlines from 2 subjects of the SNAIL dataset

    Parameters
    ----------
    subj_id : string
        Either ``subj_1`` or ``subj_2``.
    metrics : list
        Either ['fa'] or ['t1'] or ['fa', 't1']
    bundles : list
        Example ['af.left', 'cst.right', 'cc_1']. See all the available bundles
        in the ``exp_bundles_maps/bundles_2_subjects`` directory of your
        ``$HOME/.dipy`` folder.

    Returns
    -------
    dix : dict
        Dictionary with data of the metrics and the bundles as keys.

    Notes
    -----
    If you are using these datasets please cite the following publications.

    References
    ----------

    .. [1] Renauld, E., M. Descoteaux, M. Bernier, E. Garyfallidis,
    K. Whittingstall, "Morphology of thalamus, LGN and optic radiation do not
    influence EEG alpha waves", Plos One (under submission), 2015.

    .. [2] Garyfallidis, E., O. Ocegueda, D. Wassermann,
    M. Descoteaux. Robust and efficient linear registration of fascicles in the
    space of streamlines , Neuroimage, 117:124-140, 2015.

    """

    dname = pjoin(dipy_home, "exp_bundles_and_maps", "bundles_2_subjects")

    from nibabel import trackvis as tv

    res = {}

    if "t1" in metrics:
        img = nib.load(pjoin(dname, subj_id, "t1_warped.nii.gz"))
        data = img.get_data()
        affine = img.affine
        res["t1"] = data

    if "fa" in metrics:
        img_fa = nib.load(pjoin(dname, subj_id, "fa_1x1x1.nii.gz"))
        fa = img_fa.get_data()
        affine = img_fa.affine
        res["fa"] = fa

    res["affine"] = affine

    for bun in bundles:

        streams, hdr = tv.read(pjoin(dname, subj_id, "bundles", "bundles_" + bun + ".trk"), points_space="rasmm")
        streamlines = [s[0] for s in streams]
        res[bun] = streamlines

    return res