def _create_trackvis_header(self, nii_hdr):
     hdr = tv.empty_header()
     hdr['dim'] = np.array(nii_hdr.get_data_shape())
     hdr['voxel_size'] = np.array(nii_hdr.get_zooms())
     aff = np.eye(4)
     aff[0:3, 0:3] *= np.array(nii_hdr.get_zooms())
     hdr['vox_to_ras'] = aff
     return hdr
 def _create_trackvis_header(self, nii_hdr):
     hdr = tv.empty_header()
     hdr['dim'] = np.array(nii_hdr.get_data_shape())
     hdr['voxel_size'] = np.array(nii_hdr.get_zooms())
     aff = np.eye(4)
     aff[0:3,0:3] *= np.array(nii_hdr.get_zooms())
     hdr['vox_to_ras'] = aff
     return hdr
Ejemplo n.º 3
0
 def save_streamlines(self, streamlines, save_streamlines_to):
     trk_hdr = empty_header()
     voxel_order = orientation_to_string(nib.io_orientation(self.affine))
     trk_hdr['voxel_order'] = voxel_order
     trk_hdr['voxel_size'] = self.voxel_size
     trk_hdr['vox_to_ras'] = self.affine
     trk_hdr['dim'] = self.shape
     trk_tracks = ((ii, None, None) for ii in streamlines)
     write(save_streamlines_to, trk_tracks, trk_hdr)
     pickle.dump(self, open(save_streamlines_to + '.p', 'wb'))
Ejemplo n.º 4
0
 def save_streamlines(self, streamlines, save_streamlines_to):
     trk_hdr = empty_header()
     voxel_order = orientation_to_string(nib.io_orientation(self.affine))
     trk_hdr['voxel_order'] = voxel_order
     trk_hdr['voxel_size'] = self.voxel_size
     trk_hdr['vox_to_ras'] = self.affine
     trk_hdr['dim'] = self.shape
     trk_tracks = ((ii, None, None) for ii in streamlines)
     write(save_streamlines_to, trk_tracks, trk_hdr)
     pickle.dump(self, open(save_streamlines_to + '.p', 'wb'))
Ejemplo n.º 5
0
def bundle_tracks(in_file, dist_thr=40., pts=16, skip=80.):
    import subprocess
    import os.path as op
    from nibabel import trackvis as tv
    from dipy.segment.quickbundles import QuickBundles
    streams, hdr = tv.read(in_file)
    streamlines = [i[0] for i in streams]
    qb = QuickBundles(streamlines, float(dist_thr), int(pts))
    clusters = qb.clustering
    #scalars = [i[0] for i in streams]

    out_files = []
    name = "quickbundle_"
    n_clusters = clusters.keys()
    print("%d clusters found" % len(n_clusters))

    new_hdr = tv.empty_header()
    new_hdr['n_scalars'] = 1

    for cluster in clusters:
        cluster_trk = op.abspath(name + str(cluster) + ".trk")
        print("Writing cluster %d to %s" % (cluster, cluster_trk))
        out_files.append(cluster_trk)
        clust_idxs = clusters[cluster]['indices']
        new_streams = [streamlines[i] for i in clust_idxs]
        for_save = [(sl, None, None) for sl in new_streams]
        tv.write(cluster_trk, for_save, hdr)

    out_merged_file = "MergedBundles.trk"
    command_list = ["track_merge"]
    command_list.extend(out_files)
    command_list.append(out_merged_file)
    subprocess.call(command_list)
    out_scene_file = write_trackvis_scene(out_merged_file,
                                          n_clusters=len(clusters),
                                          skip=skip,
                                          names=None,
                                          out_file="NewScene.scene")
    print("Merged track file written to %s" % out_merged_file)
    print("Scene file written to %s" % out_scene_file)
    return out_files, out_merged_file, out_scene_file
Ejemplo n.º 6
0
def bundle_tracks(in_file, dist_thr=40., pts = 16, skip=80.):
    import subprocess
    import os.path as op
    from nibabel import trackvis as tv
    from dipy.segment.quickbundles import QuickBundles
    streams, hdr = tv.read(in_file)
    streamlines = [i[0] for i in streams]
    qb = QuickBundles(streamlines, float(dist_thr), int(pts))
    clusters = qb.clustering
    #scalars = [i[0] for i in streams]

    out_files = []
    name = "quickbundle_"
    n_clusters = clusters.keys()
    print("%d clusters found" % len(n_clusters))

    new_hdr = tv.empty_header()
    new_hdr['n_scalars'] = 1

    for cluster in clusters:
        cluster_trk = op.abspath(name + str(cluster) + ".trk")
        print("Writing cluster %d to %s" % (cluster, cluster_trk))
        out_files.append(cluster_trk)
        clust_idxs = clusters[cluster]['indices']
        new_streams =  [ streamlines[i] for i in clust_idxs ]
        for_save = [(sl, None, None) for sl in new_streams]
        tv.write(cluster_trk, for_save, hdr)
    
    out_merged_file = "MergedBundles.trk"
    command_list = ["track_merge"]
    command_list.extend(out_files)
    command_list.append(out_merged_file)
    subprocess.call(command_list)
    out_scene_file = write_trackvis_scene(out_merged_file, n_clusters=len(clusters), skip=skip, names=None, out_file = "NewScene.scene")
    print("Merged track file written to %s" % out_merged_file)
    print("Scene file written to %s" % out_scene_file)
    return out_files, out_merged_file, out_scene_file
Ejemplo n.º 7
0
    def __init__(self,
                 fname="",
                 tracks=None,
                 header=None,
                 connections=np.array([]),
                 original_track_indices=np.array([]),
                 properties=None,
                 **traits):
        """Convenience class for loading, manipulating and storing
        trackvis data.
        Parameters
        ----------
        fname:str
          path to the trackvis .trk file
        streams:list
          If there is already a list of tracks
        header:dict
          Trackvis header file
        connections:np.ndarray[dtype=int,ndim=1]
          array with connection Id's for each track
        storage_coords: {"ijk", "voxmm", "MNI", "qsdr"}
          How should this object store the tracks internally? Assumes
          that the original coords are in voxmm for trackvis
        properties: traited_query.Scan object
          information about this dataset
        """

        #if not None in (tracks,header):
        if not (header is None):
            # Tracks and header are provided
            self.header = header
            self.set_tracks(tracks)
        else:
            if fname.endswith("trk.gz") or fname.endswith("trk"):
                if fname.endswith("trk.gz"):
                    fl = gzip.open(fname, "r")
                elif fname.endswith("trk"):
                    fl = open(fname, "r")
                streams, self.header = trackvis.read(fl)
                # Convert voxmm to ijk
                self.set_tracks(
                    np.array([stream[0] for stream in streams],
                             dtype=np.object))
                fl.close()
                # Check for scalars, support them someday
                if self.header['n_scalars'] > 0:
                    print "WARNING: Ignoring track scalars in %s" % fname
            elif fname.endswith("txt"):
                fop = open(fname, "r")
                self.set_tracks( np.array(
                    [np.array(map(float, line.strip().split())).reshape(-1,3) for \
                     line in fop ], dtype=np.object ))
            elif fname.endswith("mat"):
                pass

        if not hasattr(self, "header"):
            self.header = trackvis.empty_header()
        if properties is None:
            from dsi2.database.traited_query import Scan
            print "Warning: using default properties"
            self.properties = Scan()
        else:
            self.properties = properties

        self.connections = connections
        self.clusters = []
        self.original_track_indices = original_track_indices
Ejemplo n.º 8
0
     RangeEditor, TableEditor, Handler, Include,HSplit, EnumEditor, HSplit, Action, \
     CheckListEditor, ObjectColumn

from traits.api import HasTraits, Instance, Array, \
    CInt, Color, Bool, List, Int, Str, Instance, Any, Enum, \
    DelegatesTo, on_trait_change, Button
import cPickle as pickle
from mayavi.core.ui.api import SceneEditor
from mayavi.tools.mlab_scene_model import MlabSceneModel
from mayavi import mlab
import gzip

from .track_math import tracks_to_endpoints
from mayavi.core.api import PipelineBase, Source

mni_hdr = trackvis.empty_header()
mni_hdr['dim'] = np.array([91, 109, 91], dtype="int16")
mni_hdr['voxel_order'] = 'LAS'
mni_hdr['voxel_size'] = np.array([2., 2., 2.], dtype='float32')
mni_hdr['image_orientation_patient'] = np.array([1., 0., 0., 0., -1., 0.],
                                                dtype='float32')
mni_hdr['vox_to_ras'] = \
           np.array([[ -2.,  0.,  0.,   90.],
                     [  0.,  2.,  0., -126.],
                     [  0.,  0.,  2.,  -72.],
                     [  0.,  0.,  0.,    1.]], dtype='float32')

qsdr_hdr = trackvis.empty_header(version=1)
qsdr_hdr['dim'] = np.array([79, 95, 69], dtype="int16")
qsdr_hdr['voxel_order'] = 'LPS'
qsdr_hdr['voxel_size'] = np.array([2., 2., 2.], dtype='float32')
Ejemplo n.º 9
0
def tractography_to_trackvis_file(filename,
                                  tractography,
                                  affine=None,
                                  image_dimensions=None):
    trk_header = trackvis.empty_header()

    if affine is not None:
        pass
    elif hasattr(tractography, 'affine'):
        affine = tractography.affine
    else:
        raise ValueError("Affine transform has to be provided")

    trackvis.aff_to_hdr(affine, trk_header, True, True)
    trk_header['origin'] = 0.
    if image_dimensions is not None:
        trk_header['dim'] = image_dimensions
    elif hasattr(tractography, 'image_dimensions'):
        trk_header['dim'] = tractography.image_dimensions
    else:
        raise ValueError("Image dimensions needed to save a trackvis file")

    orig_data = tractography.tracts_data()
    data = {}
    for k, v in orig_data.items():
        if not isinstance(v[0], numpy.ndarray):
            continue
        if (v[0].ndim > 1 and any(d > 1 for d in v[0].shape[1:])):
            warn("Scalar data %s ignored as trackvis "
                 "format does not handle multivalued data" % k)
        else:
            data[k] = v

    #data_new = {}
    # for k, v in data.iteritems():
    #    if (v[0].ndim > 1 and v[0].shape[1] > 1):
    #        for i in xrange(v[0].shape[1]):
    #            data_new['%s_%02d' % (k, i)] = [
    #                v_[:, i] for v_ in v
    #            ]
    #    else:
    #       data_new[k] = v
    trk_header['n_count'] = len(tractography.tracts())
    trk_header['n_properties'] = 0
    trk_header['n_scalars'] = len(data)

    if len(data) > 10:
        raise ValueError('At most 10 scalars permitted per point')

    trk_header['scalar_name'][:len(data)] = numpy.array([n[:20] for n in data],
                                                        dtype='|S20')
    trk_tracts = []

    for i, sl in enumerate(tractography.tracts()):
        scalars = None
        if len(data) > 0:
            scalars = numpy.vstack([
                data[k.decode('utf8')][i].squeeze()
                for k in trk_header['scalar_name'][:len(data)]
            ]).T

        trk_tracts.append((sl, scalars, None))

    trackvis.write(filename, trk_tracts, trk_header, points_space='rasmm')
Ejemplo n.º 10
0
 def hdr(self):
     if self._hdr is not None:
         return self._hdr
     else:
         return trackvis.empty_header()
Ejemplo n.º 11
0
def tractography_to_trackvis_file(filename, tractography, affine=None, image_dimensions=None):
    trk_header = trackvis.empty_header()

    if affine is not None:
        pass
    elif hasattr(tractography, 'affine'):
        affine = tractography.affine
    else:
        raise ValueError("Affine transform has to be provided")

    trackvis.aff_to_hdr(affine, trk_header, True, True)
    trk_header['origin'] = 0.
    if image_dimensions is not None:
        trk_header['dim'] = image_dimensions
    elif hasattr(tractography, 'image_dimensions'):
        trk_header['dim'] = tractography.image_dimensions
    else:
        raise ValueError("Image dimensions needed to save a trackvis file")

    orig_data = tractography.tracts_data()
    data = {}
    for k, v in orig_data.items():
        if not isinstance(v[0], numpy.ndarray):
            continue
        if (v[0].ndim > 1 and any(d > 1 for d in v[0].shape[1:])):
            warn(
                "Scalar data %s ignored as trackvis "
                "format does not handle multivalued data" % k
            )
        else:
            data[k] = v

    #data_new = {}
    # for k, v in data.iteritems():
    #    if (v[0].ndim > 1 and v[0].shape[1] > 1):
    #        for i in xrange(v[0].shape[1]):
    #            data_new['%s_%02d' % (k, i)] = [
    #                v_[:, i] for v_ in v
    #            ]
    #    else:
    #       data_new[k] = v
    trk_header['n_count'] = len(tractography.tracts())
    trk_header['n_properties'] = 0
    trk_header['n_scalars'] = len(data)

    if len(data) > 10:
        raise ValueError('At most 10 scalars permitted per point')

    trk_header['scalar_name'][:len(data)] = numpy.array(
        [n[:20] for n in data],
        dtype='|S20'
    )
    trk_tracts = []

    for i, sl in enumerate(tractography.tracts()):
        scalars = None
        if len(data) > 0:
            scalars = numpy.vstack([
                data[k.decode('utf8')][i].squeeze()
                for k in trk_header['scalar_name'][:len(data)]
            ]).T

        trk_tracts.append((sl, scalars, None))

    trackvis.write(filename, trk_tracts, trk_header, points_space='rasmm')
Ejemplo n.º 12
0
    def __init__(self, fname="", tracks=None, header=None,
                 connections=np.array([]),
                 original_track_indices=np.array([]),
                 properties=None,**traits):
        """Convenience class for loading, manipulating and storing
        trackvis data.
        Parameters
        ----------
        fname:str
          path to the trackvis .trk file
        streams:list
          If there is already a list of tracks
        header:dict
          Trackvis header file
        connections:np.ndarray[dtype=int,ndim=1]
          array with connection Id's for each track
        storage_coords: {"ijk", "voxmm", "MNI", "qsdr"}
          How should this object store the tracks internally? Assumes
          that the original coords are in voxmm for trackvis
        properties: traited_query.Scan object
          information about this dataset
        """

        #if not None in (tracks,header):
        if not (header is None):
            # Tracks and header are provided
            self.header = header
            self.set_tracks(tracks)
        else:
            if fname.endswith("trk.gz") or fname.endswith("trk"):
                if fname.endswith("trk.gz"):
                    fl = gzip.open(fname,"r")
                elif fname.endswith("trk"):
                    fl = open(fname,"r")
                streams, self.header = trackvis.read(fl)
                # Convert voxmm to ijk
                self.set_tracks(np.array([stream[0] for stream in streams],
                                     dtype=np.object))
                fl.close()
                # Check for scalars, support them someday
                if self.header['n_scalars'] > 0:
                    print "WARNING: Ignoring track scalars in %s"%fname
            elif fname.endswith("txt"):
                fop = open(fname,"r")
                self.set_tracks( np.array(
                    [np.array(map(float, line.strip().split())).reshape(-1,3) for \
                     line in fop ], dtype=np.object ))
            elif fname.endswith("mat"):
                pass

        if not hasattr(self,"header"):
            self.header = trackvis.empty_header()
        if properties is None:
            from dsi2.database.traited_query import Scan
            print "Warning: using default properties"
            self.properties = Scan()
        else:
            self.properties = properties

        self.connections = connections
        self.clusters = []
        self.original_track_indices = original_track_indices
Ejemplo n.º 13
0
     CheckListEditor, ObjectColumn

from traits.api import HasTraits, Instance, Array, \
    CInt, Color, Bool, List, Int, Str, Instance, Any, Enum, \
    DelegatesTo, on_trait_change, Button
import cPickle as pickle
from mayavi.core.ui.api import SceneEditor
from mayavi.tools.mlab_scene_model import MlabSceneModel
from mayavi import mlab
import gzip


from .track_math import tracks_to_endpoints
from mayavi.core.api import PipelineBase, Source

mni_hdr = trackvis.empty_header()
mni_hdr['dim'] = np.array([91,109,91],dtype="int16")
mni_hdr['voxel_order'] = 'LAS'
mni_hdr['voxel_size'] = np.array([ 2.,  2.,  2.], dtype='float32')
mni_hdr['image_orientation_patient'] = np.array(
                     [ 1.,  0.,  0.,  0.,  -1.,  0.], dtype='float32')
mni_hdr['vox_to_ras'] = \
           np.array([[ -2.,  0.,  0.,   90.],
                     [  0.,  2.,  0., -126.],
                     [  0.,  0.,  2.,  -72.],
                     [  0.,  0.,  0.,    1.]], dtype='float32')

qsdr_hdr = trackvis.empty_header(version=1)
qsdr_hdr['dim'] = np.array([79, 95, 69],dtype="int16")
qsdr_hdr['voxel_order'] = 'LPS'
qsdr_hdr['voxel_size'] = np.array([ 2.,  2.,  2.], dtype='float32')
'''
Created on 1 Apr 2011

@author: filo
'''
import nibabel.trackvis as tv
import nibabel as nb
import numpy as np
import cPickle

pkl_file = open('streamlines.pkl', 'rb')

streamlines = cPickle.load(pkl_file)
print type(streamlines)

hdr = tv.empty_header()
nifti_filename = 'dtifit__FA.nii'
nii_hdr = nb.load(nifti_filename).get_header()
hdr['dim'] = np.array(nii_hdr.get_data_shape())
hdr['voxel_size'] = np.array(nii_hdr.get_zooms())
aff = np.eye(4)
aff[0:3, 0:3] *= np.array(nii_hdr.get_zooms())
hdr['vox_to_ras'] = aff

print hdr['version']

for i in range(len(streamlines)):

    points_arr = streamlines[i][0]

    #invert y
Ejemplo n.º 15
0
#Treat these streamlines as if they are in trackvis format and generate counts
counts_trackvis = density_map(streamlines, (4, 4, 5), (1, 1, 1))

#Treat these streamlines as if they are in nifti format and generate counts
counts_nifti = track_counts(streamlines, (4, 4, 5), (1, 1, 1),
                            return_elements=False)

print("saving trk files and track_count volumes")
aff = np.eye(4)
aff[0, 0] = -1
img = nib.Nifti1Image(counts_trackvis.astype('int16'), aff)
nib.save(img, 'counts_trackvis.nii.gz')
img = nib.Nifti1Image(counts_nifti.astype('int16'), aff)
nib.save(img, 'counts_nifti.nii.gz')

hdr = empty_header()
hdr['voxel_size'] = (1, 1, 1)
hdr['voxel_order'] = 'las'
hdr['vox_to_ras'] = aff
hdr['dim'] = counts_nifti.shape

#Treat these streamlines like they are in trackvis format and save them
streamlines_trackvis = ((ii, None, None) for ii in streamlines)
write('slAsTrackvis.trk', streamlines_trackvis, hdr)

#Move these streamlines from nifti to trackvis format and save them
streamlines_nifti = ((ii + .5, None, None) for ii in streamlines)
write('slAsNifti.trk', streamlines_nifti, hdr)
"""
Trackvis:
A------------
Ejemplo n.º 16
0
#Treat these streamlines as if they are in trackvis format and generate counts
counts_trackvis = density_map(streamlines, (4,4,5), (1,1,1))

#Treat these streamlines as if they are in nifti format and generate counts
counts_nifti = track_counts(streamlines, (4,4,5), (1,1,1), 
                            return_elements=False)

print("saving trk files and track_count volumes")
aff = np.eye(4)
aff[0, 0] = -1
img = nib.Nifti1Image(counts_trackvis.astype('int16'), aff)
nib.save(img, 'counts_trackvis.nii.gz')
img = nib.Nifti1Image(counts_nifti.astype('int16'), aff)
nib.save(img, 'counts_nifti.nii.gz')

hdr = empty_header()
hdr['voxel_size'] = (1,1,1)
hdr['voxel_order'] = 'las'
hdr['vox_to_ras'] = aff
hdr['dim'] = counts_nifti.shape

#Treat these streamlines like they are in trackvis format and save them
streamlines_trackvis = ((ii,None,None) for ii in streamlines)
write('slAsTrackvis.trk', streamlines_trackvis, hdr)

#Move these streamlines from nifti to trackvis format and save them
streamlines_nifti = ((ii+.5,None,None) for ii in streamlines)
write('slAsNifti.trk', streamlines_nifti, hdr)

"""
Trackvis: