Example #1
0
def write_trk(fname, streamlines, affine=None, shape=None):
    """
    Write out a .trk file

    Parameters
    ----------
    fname : str
        Full path to save the file into
    streamlines : list
        A list of arrays of 3D coordinates
    affine : array (4,4), optional.
        An affine transformation associated with the streamlines. Defaults to
        identity.
    shape : 3-element tuple, optional
        Spatial dimensions of an image associated with the streamlines.
        Defaults to not be set in the file header.
    """
    if affine is None:
        affine = np.eye(4)

    zooms = np.sqrt((affine * affine).sum(0))
    streamlines = dtu.move_streamlines(streamlines, affine)
    data = ((s, None, None) for s in streamlines)

    voxel_order = nib.orientations.aff2axcodes(affine)
    voxel_order = "".join(voxel_order)

    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = zooms[:3]
    hdr['voxel_order'] = voxel_order
    hdr['vox_to_ras'] = affine
    if shape is not None:
        hdr['dim'] = shape
    trackvis.write(fname, data, hdr, points_space="rasmm")
Example #2
0
def write_trk(fname, streamlines, affine=None, shape=None):
    """
    Write out a .trk file

    Parameters
    ----------
    fname : str
        Full path to save the file into
    streamlines : list
        A list of arrays of 3D coordinates
    affine : array (4,4), optional.
        An affine transformation associated with the streamlines. Defaults to
        identity.
    shape : 3-element tuple, optional
        Spatial dimensions of an image associated with the streamlines.
        Defaults to not be set in the file header.
    """
    if affine is None:
        affine = np.eye(4)

    zooms = np.sqrt((affine * affine).sum(0))
    streamlines = move_streamlines(streamlines, affine)
    data = ((s, None, None) for s in streamlines)

    voxel_order = nib.orientations.aff2axcodes(affine)
    voxel_order = "".join(voxel_order)

    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = zooms[:3]
    hdr['voxel_order'] = voxel_order
    hdr['vox_to_ras'] = affine
    if shape is not None:
        hdr['dim'] = shape
    trackvis.write(fname, data, hdr, points_space="rasmm")
Example #3
0
    def _run_interface(self, runtime):
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.get_affine()
        out_filename = op.abspath(self.inputs.out_filename)

        # Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(self.inputs.in_file, as_generator=True)
        iflogger.info("MRTrix Header:")
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header["dim"] = [dx, dy, dz]
        trk_header["voxel_size"] = [vx, vy, vz]
        trk_header["n_count"] = header["count"]

        if isdefined(self.inputs.matrix_file) and isdefined(self.inputs.registration_image_file):
            iflogger.info("Applying transformation from matrix file {m}".format(m=self.inputs.matrix_file))
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(self.inputs.registration_image_file)
            reg_affine = registration_image_file.get_affine()
            r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file)
            iflogger.info("Using affine from registration image file {r}".format(r=self.inputs.registration_image_file))
            iflogger.info(reg_affine)
            trk_header["vox_to_ras"] = reg_affine
            trk_header["dim"] = [r_dx, r_dy, r_dz]
            trk_header["voxel_size"] = [r_vx, r_vy, r_vz]

            affine = np.dot(affine, np.diag(1.0 / np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz], [r_vx, r_vy, r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii, None, None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info("Saving transformed Trackvis file as {out}".format(out=out_filename))
            iflogger.info("New TrackVis Header:")
            iflogger.info(trk_header)
        else:
            iflogger.info(
                "Applying transformation from scanner coordinates to {img}".format(img=self.inputs.image_file)
            )
            axcode = aff2axcodes(affine)
            trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2]
            trk_header["vox_to_ras"] = affine
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)
            trk_tracks = ((ii, None, None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info("Saving Trackvis file as {out}".format(out=out_filename))
            iflogger.info("TrackVis Header:")
            iflogger.info(trk_header)
        return runtime
Example #4
0
    def _run_interface(self, runtime):
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.get_affine()
        out_filename = op.abspath(self.inputs.out_filename)

        #Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(self.inputs.in_file, as_generator=True)
        iflogger.info('MRTrix Header:')
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header['dim'] = [dx,dy,dz]
        trk_header['voxel_size'] = [vx,vy,vz]
        trk_header['n_count'] = header['count']

        if isdefined(self.inputs.matrix_file) and isdefined(self.inputs.registration_image_file):
            iflogger.info('Applying transformation from matrix file {m}'.format(m=self.inputs.matrix_file))
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(self.inputs.registration_image_file)
            reg_affine = registration_image_file.get_affine()
            r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file)
            iflogger.info('Using affine from registration image file {r}'.format(r=self.inputs.registration_image_file))
            iflogger.info(reg_affine)
            trk_header['vox_to_ras'] = reg_affine
            trk_header['dim'] = [r_dx,r_dy,r_dz]
            trk_header['voxel_size'] = [r_vx,r_vy,r_vz]

            affine = np.dot(affine,np.diag(1./np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx,vy,vz], [r_vx,r_vy,r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header['voxel_order'] = axcode[0]+axcode[1]+axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii,None,None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving transformed Trackvis file as {out}'.format(out=out_filename))
            iflogger.info('New TrackVis Header:')
            iflogger.info(trk_header)
        else:
            iflogger.info('Applying transformation from scanner coordinates to {img}'.format(img=self.inputs.image_file))
            axcode = aff2axcodes(affine)
            trk_header['voxel_order'] = axcode[0]+axcode[1]+axcode[2]
            trk_header['vox_to_ras'] = affine
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)
            trk_tracks = ((ii,None,None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving Trackvis file as {out}'.format(out=out_filename))
            iflogger.info('TrackVis Header:')
            iflogger.info(trk_header)
        return runtime
Example #5
0
    def dump_qsdr2MNI_track_lookup(self, output, savetrk=False):
        """Converts from trackvis voxmm to ijk.
        Parameters:
        ===========
        output:str
          .pkl file where the voxel to streamline id mapping is saved
        savetrk:str
          path to where the trackvis file will be saved.

        NOTE:
        =====
         REQUIRES that this trackdataset came from a trk file, as the header is
         essential to transforming tracks to ijk.

         From experience, tracks brought into alignment with the MNI152 template
         via DTK's track_transform appear as LAS -> LPS in trackvis's dataset info panel.
         I have been able to get qsdr2MNI to look correct in trackvis by converting
         it to LAS ordering. LAS also relates voxmm coordinates to MNI152 ijk by
         a scalar factor.

        """
        # index tracks by the voxels they pass through
        mni_voxel_size = np.array([2.]*3)
        tracks_at_ijk = defaultdict(set)
        output_voxmm = []
        for trknum, trk in enumerate(self.tracks):
            data = set([trknum])
            # convert voxmm to LAS
            ijk = self.voxmm_to_ijk(trk, to_order="LAS")
            pretty_ijk = self.voxmm_to_ijk(trk, to_order="LAS", floor=False)
            # QSDR from DSI Studio has a different bounding box.
            ijk = ijk + np.array([6,7,11])
            pretty_ijk = pretty_ijk + np.array([6,7,11])

            output_voxmm.append(pretty_ijk*mni_voxel_size)
            # Floor at the end?
            unq = track_math.remove_duplicates(ijk).astype(np.int32)
            for _ijk in unq:
                tracks_at_ijk[tuple(_ijk)].update(data)
        self.tracks_at_ijk = tracks_at_ijk
        print "original", self.tracks.shape, "tracks"
        self.tracks = np.array(output_voxmm)
        print "replaced by", self.tracks.shape, "tracks"

        # Save the hash dump
        fop = open(output,"wb")
        pickle.dump(self,fop,pickle.HIGHEST_PROTOCOL)
        fop.close()

        # Write out a new trackvis file
        if savetrk:
            # Actually write a trk file so we can check against the
            #   MNI brain in trackvis
            trackvis.write(
                savetrk,
                ((stream*mni_hdr['voxel_size'],None,None) for stream in output_voxmm),
                np.array(mni_hdr)
                )
Example #6
0
    def dump_qsdr2MNI_track_lookup(self, output, savetrk=False):
        """Converts from trackvis voxmm to ijk.
        Parameters:
        ===========
        output:str
          .pkl file where the voxel to streamline id mapping is saved
        savetrk:str
          path to where the trackvis file will be saved.

        NOTE:
        =====
         REQUIRES that this trackdataset came from a trk file, as the header is
         essential to transforming tracks to ijk.

         From experience, tracks brought into alignment with the MNI152 template
         via DTK's track_transform appear as LAS -> LPS in trackvis's dataset info panel.
         I have been able to get qsdr2MNI to look correct in trackvis by converting
         it to LAS ordering. LAS also relates voxmm coordinates to MNI152 ijk by
         a scalar factor.

        """
        # index tracks by the voxels they pass through
        mni_voxel_size = np.array([2.] * 3)
        tracks_at_ijk = defaultdict(set)
        output_voxmm = []
        for trknum, trk in enumerate(self.tracks):
            data = set([trknum])
            # convert voxmm to LAS
            ijk = self.voxmm_to_ijk(trk, to_order="LAS")
            pretty_ijk = self.voxmm_to_ijk(trk, to_order="LAS", floor=False)
            # QSDR from DSI Studio has a different bounding box.
            ijk = ijk + np.array([6, 7, 11])
            pretty_ijk = pretty_ijk + np.array([6, 7, 11])

            output_voxmm.append(pretty_ijk * mni_voxel_size)
            # Floor at the end?
            unq = track_math.remove_duplicates(ijk).astype(np.int32)
            for _ijk in unq:
                tracks_at_ijk[tuple(_ijk)].update(data)
        self.tracks_at_ijk = tracks_at_ijk
        print "original", self.tracks.shape, "tracks"
        self.tracks = np.array(output_voxmm)
        print "replaced by", self.tracks.shape, "tracks"

        # Save the hash dump
        fop = open(output, "wb")
        pickle.dump(self, fop, pickle.HIGHEST_PROTOCOL)
        fop.close()

        # Write out a new trackvis file
        if savetrk:
            # Actually write a trk file so we can check against the
            #   MNI brain in trackvis
            trackvis.write(savetrk,
                           ((stream * mni_hdr['voxel_size'], None, None)
                            for stream in output_voxmm), np.array(mni_hdr))
    def _run_interface(self, runtime):

        nii_hdr = nb.load(self.inputs.reference_file).get_header()
        hdr = self._create_trackvis_header(nii_hdr)

        fobj = open(self.inputs.out_file, 'w')
        tv.write(fobj, self._read_tracts(nii_hdr), hdr)
        fobj.close()

        return runtime
Example #8
0
 def save_streamlines(self, streamlines, save_streamlines_to):
     trk_hdr = empty_header()
     voxel_order = orientation_to_string(nib.io_orientation(self.affine))
     trk_hdr['voxel_order'] = voxel_order
     trk_hdr['voxel_size'] = self.voxel_size
     trk_hdr['vox_to_ras'] = self.affine
     trk_hdr['dim'] = self.shape
     trk_tracks = ((ii, None, None) for ii in streamlines)
     write(save_streamlines_to, trk_tracks, trk_hdr)
     pickle.dump(self, open(save_streamlines_to + '.p', 'wb'))
 def _run_interface(self, runtime):
     
     nii_hdr = nb.load(self.inputs.reference_file).get_header()  
     hdr = self._create_trackvis_header(nii_hdr)
        
     fobj=open(self.inputs.out_file,'w')
     tv.write(fobj,self._read_tracts(nii_hdr),hdr)
     fobj.close()
     
     return runtime
Example #10
0
 def save_streamlines(self, streamlines, save_streamlines_to):
     trk_hdr = empty_header()
     voxel_order = orientation_to_string(nib.io_orientation(self.affine))
     trk_hdr['voxel_order'] = voxel_order
     trk_hdr['voxel_size'] = self.voxel_size
     trk_hdr['vox_to_ras'] = self.affine
     trk_hdr['dim'] = self.shape
     trk_tracks = ((ii, None, None) for ii in streamlines)
     write(save_streamlines_to, trk_tracks, trk_hdr)
     pickle.dump(self, open(save_streamlines_to + '.p', 'wb'))
Example #11
0
def filter_fibers(intrk,
                  outtrk='',
                  fiber_cutoff_lower=20,
                  fiber_cutoff_upper=500):
    """Filters a tractogram based on lower / upper cutoffs.

    Parameters
    ----------
    intrk : TRK file
        Path to a tractogram file in TRK format

    outtrk : TRK file
        Output path for the filtered tractogram

    fiber_cutoff_lower : int
        Lower number of fibers cutoff (Default: 20)

    fiber_cutoff_upper : int
        Upper number of fibers cutoff (Default: 500)
    """
    print("Cut Fiber Filtering")
    print("===================")

    print("Input file for fiber cutting is: %s" % intrk)

    if outtrk == '':
        _, filename = os.path.split(intrk)
        base, ext = os.path.splitext(filename)
        outtrk = os.path.abspath(base + '_cutfiltered' + ext)

    # compute length array
    le = compute_length_array(intrk)

    # cut the fibers smaller than value
    reducedidx = np.where((le > fiber_cutoff_lower)
                          & (le < fiber_cutoff_upper))[0]

    # load trackfile (downside, needs everything in memory)
    fibold, hdrold = tv.read(intrk)

    # rewrite the track vis file with the reduced number of fibers
    outstreams = []
    for i in reducedidx:
        outstreams.append(fibold[i])

    n_fib_out = len(outstreams)
    hdrnew = hdrold.copy()
    hdrnew['n_count'] = n_fib_out

    # print("Compute length array for cutted fibers")
    # le = compute_length_array(streams=outstreams)
    print("Write out file: %s" % outtrk)
    print("Number of fibers out : %d" % hdrnew['n_count'])
    tv.write(outtrk, outstreams, hdrnew)
    print("File wrote : %d" % os.path.exists(outtrk))
Example #12
0
    def save(self, fname, use_mni_header=False, use_qsdr_header=False):
        """Save the object as a .trk file"""
        if use_mni_header:
            header = mni_hdr
        elif use_qsdr_header:
            header = qsdr_hdr
        else:
            header = self.header

        trackvis.write(fname, ((stream, None, None) for stream in self),
                       np.array(header))
Example #13
0
 def write(self, filename, **kwargs):
     endianness = kwargs.get('endianness', None)
     hdr_mapping = kwargs.get('hdr_mapping', self.hdr)
     points_space = kwargs.get('points_space', 'voxel')
     streamlines = kwargs.get('streamlines', self.fibers)
     def streamlines_gen(streamlines):
         for streamline in streamlines:
             yield (streamline, None, None)
     trackvis.write(filename, streamlines_gen(streamlines),
                    hdr_mapping=hdr_mapping, endianness=endianness,
                    points_space=points_space)
def save_trk(tract_name, test_tractogram, segmented_tract_LAP,  hdr, prefix):
    """Save the segmented tract estimated from the LAP 
    """      
    filedir = os.path.dirname('data/segmented_tract/')
    if not os.path.exists(filedir):
            os.makedirs(filedir) 
            
    save_segmented_tract_LAP_filename = '%s/%s_%s_%s.trk'%\
                                            (filedir, test_tractogram, tract_name, prefix)
    
    strmR_A = ((sl, None, None) for sl in  segmented_tract_LAP )               
    trackvis.write(  save_segmented_tract_LAP_filename ,strmR_A ,  hdr)
Example #15
0
 def save(self,fname,use_mni_header=False,use_qsdr_header=False):
     """Save the object as a .trk file"""
     if use_mni_header:
         header = mni_hdr
     elif use_qsdr_header:
         header=qsdr_hdr
     else:
         header= self.header
         
     trackvis.write(
         fname,
         ((stream,None,None) for stream in self),
         np.array(header)
     )
Example #16
0
def bundle_tracks(in_file, dist_thr=40., pts=16, skip=80.):
    import subprocess
    import os.path as op
    from nibabel import trackvis as tv
    from dipy.segment.quickbundles import QuickBundles
    streams, hdr = tv.read(in_file)
    streamlines = [i[0] for i in streams]
    qb = QuickBundles(streamlines, float(dist_thr), int(pts))
    clusters = qb.clustering
    #scalars = [i[0] for i in streams]

    out_files = []
    name = "quickbundle_"
    n_clusters = clusters.keys()
    print("%d clusters found" % len(n_clusters))

    new_hdr = tv.empty_header()
    new_hdr['n_scalars'] = 1

    for cluster in clusters:
        cluster_trk = op.abspath(name + str(cluster) + ".trk")
        print("Writing cluster %d to %s" % (cluster, cluster_trk))
        out_files.append(cluster_trk)
        clust_idxs = clusters[cluster]['indices']
        new_streams = [streamlines[i] for i in clust_idxs]
        for_save = [(sl, None, None) for sl in new_streams]
        tv.write(cluster_trk, for_save, hdr)

    out_merged_file = "MergedBundles.trk"
    command_list = ["track_merge"]
    command_list.extend(out_files)
    command_list.append(out_merged_file)
    subprocess.call(command_list)
    out_scene_file = write_trackvis_scene(out_merged_file,
                                          n_clusters=len(clusters),
                                          skip=skip,
                                          names=None,
                                          out_file="NewScene.scene")
    print("Merged track file written to %s" % out_merged_file)
    print("Scene file written to %s" % out_scene_file)
    return out_files, out_merged_file, out_scene_file
 def _run_interface(self, runtime):
     
     tracts, hdr = tv.read(self.inputs.trackvis_file, as_generator=True)
     
     self.stat_files_data = []
     self.max_maps_data = []
     self.mean_maps_data = []
     for stat_file in self.inputs.stat_files:
         fmri_nii = nb.load(stat_file)
         self.stat_files_data.append(fmri_nii.get_data())
     
         self.max_maps_data.append(np.zeros(fmri_nii.get_header().get_data_shape()))
         self.mean_maps_data.append(np.zeros(fmri_nii.get_header().get_data_shape()))
     
     hdr = hdr.copy()
     if isdefined(self.inputs.stat_labels) and len(self.inputs.stat_labels) == len(self.inputs.stat_files):
         for i, label in enumerate(self.inputs.stat_labels):
             hdr['property_name'][i] = ('max_%s'%label)[0:19]
             #hdr['property_name'][1+i*2] = 'stat_mean_%s'%label
     else:        
         for i in range(len(self.inputs.stat_files)):
             hdr['property_name'][i] = ('max%d'%i)[0:19]
             #hdr['property_name'][1+i*2] = 'stat_mean%d'%i
     
     tv.write(self.inputs.out_tracks, self._gen_annotate_tracts(tracts, hdr), hdr)
     
     if isdefined(self.inputs.stat_labels) and len(self.inputs.stat_labels) == len(self.inputs.stat_files):
         for i, label in enumerate(self.inputs.stat_labels):
             nb.save(nb.Nifti1Image(self.max_maps_data[i], fmri_nii.get_affine(), fmri_nii.get_header()), self.inputs.out_max_map_prefix + "_%s"%label + '.nii')
             nb.save(nb.Nifti1Image(self.mean_maps_data[i], fmri_nii.get_affine(), fmri_nii.get_header()), self.inputs.out_mean_map_prefix + "_%s"%label + '.nii')
     else:
         for i in range(len(self.inputs.stat_files)):
             nb.save(nb.Nifti1Image(self.max_maps_data[i], fmri_nii.get_affine(), fmri_nii.get_header()), self.inputs.out_max_map_prefix + str(i) + '.nii')
             nb.save(nb.Nifti1Image(self.mean_maps_data[i], fmri_nii.get_affine(), fmri_nii.get_header()), self.inputs.out_mean_map_prefix + str(i) + '.nii')
     
     del self.mean_maps_data
     del self.max_maps_data
     del self.stat_files_data
         
     return runtime
Example #18
0
def get_offspring(tractogram_fn, output_dir, subject, tract_name,
                  points_per_sl):
    # Load (using old TractSeg dataset <=V1.1.0 https://github.com/MIC-DKFZ/TractSeg/blob/17b33b37bafad7566de6372a534a14a1ef5a7384/resources/utility_scripts/trk_2_binary.py)
    streams, header = trackvis.read(tractogram_fn)

    # Re-sample each streamline to have the required number of points
    # Since trackvis.read returns a tuple for each streamline (with streamline coordinates being
    #  the first element in that list, first we need to extract those actual coordinates)
    only_streamlines = [sl[0]
                        for sl in streams]  # extract the actual coordinates
    resampled_points = set_number_of_points(only_streamlines, points_per_sl)

    # Now store these resampled coordinates into the streamline tuples
    # Since you can't modify tuples, we need to re-write the tuples entirely
    for sl_num in range(len(streams)):
        streams[sl_num] = (resampled_points[sl_num], streams[sl_num][1],
                           streams[sl_num][2])

    # Note, new header not needed since # of streamlines is automatically updated when the file is saved
    trackvis.write(output_dir + "/" + subject + "_" + tract_name + ".trk",
                   streamlines=streams,
                   hdr_mapping=header)
Example #19
0
def filter_fibers(applied_spline=False):
    
    log.info("Cut Fiber Filtering")
    log.info("===================")
    log.info("Was spline filtering applied? %s" % applied_spline)
    
    if applied_spline:
        intrk = op.join(gconf.get_cmp_fibers(), 'streamline_splinefiltered.trk')
    else:
        intrk = op.join(gconf.get_cmp_fibers(), 'streamline.trk')
        
    log.info("Input file for fiber cutting is: %s" % intrk)
    
    outtrk = op.join(gconf.get_cmp_fibers(), 'streamline_cutfiltered.trk')
    
    # compute length array
    le = compute_length_array(intrk, savefname = 'lengths_beforecutfiltered.npy')
    
    # cut the fibers smaller than value
    reducedidx = np.where((le>gconf.fiber_cutoff_lower) & (le<gconf.fiber_cutoff_upper))[0]
    
    # load trackfile (downside, needs everything in memory)
    fibold, hdrold = tv.read(intrk)
    
    # rewrite the track vis file with the reduced number of fibers
    outstreams = []
    for i in reducedidx:
        outstreams.append( fibold[i] )
    
    n_fib_out = len(outstreams)
    hdrnew = hdrold.copy()
    hdrnew['n_count'] = n_fib_out
    
    log.info("Compute length array for cutted fibers")
    le = compute_length_array(streams=outstreams)
    log.info("Write out file: %s" % outtrk)
    tv.write(outtrk, outstreams, hdrnew)
def bundle_tracks(in_file, dist_thr=40., pts = 16, skip=80.):
    import subprocess
    import os.path as op
    from nibabel import trackvis as tv
    from dipy.segment.quickbundles import QuickBundles
    streams, hdr = tv.read(in_file)
    streamlines = [i[0] for i in streams]
    qb = QuickBundles(streamlines, float(dist_thr), int(pts))
    clusters = qb.clustering
    #scalars = [i[0] for i in streams]

    out_files = []
    name = "quickbundle_"
    n_clusters = clusters.keys()
    print("%d clusters found" % len(n_clusters))

    new_hdr = tv.empty_header()
    new_hdr['n_scalars'] = 1

    for cluster in clusters:
        cluster_trk = op.abspath(name + str(cluster) + ".trk")
        print("Writing cluster %d to %s" % (cluster, cluster_trk))
        out_files.append(cluster_trk)
        clust_idxs = clusters[cluster]['indices']
        new_streams =  [ streamlines[i] for i in clust_idxs ]
        for_save = [(sl, None, None) for sl in new_streams]
        tv.write(cluster_trk, for_save, hdr)
    
    out_merged_file = "MergedBundles.trk"
    command_list = ["track_merge"]
    command_list.extend(out_files)
    command_list.append(out_merged_file)
    subprocess.call(command_list)
    out_scene_file = write_trackvis_scene(out_merged_file, n_clusters=len(clusters), skip=skip, names=None, out_file = "NewScene.scene")
    print("Merged track file written to %s" % out_merged_file)
    print("Scene file written to %s" % out_scene_file)
    return out_files, out_merged_file, out_scene_file
Example #21
0
def filter_fibers(intrk, outtrk='', fiber_cutoff_lower=20, fiber_cutoff_upper=500):
    
    print("Cut Fiber Filtering")
    print("===================")
        
    print("Input file for fiber cutting is: %s" % intrk)
    
    if outtrk == '':
        path, filename = os.path.split(intrk)
        base, ext = os.path.splitext(filename)
        outtrk = os.path.abspath(base + '_cutfiltered' + ext)
    
    # compute length array
    le = compute_length_array(intrk)
    
    # cut the fibers smaller than value
    reducedidx = np.where((le>fiber_cutoff_lower) & (le<fiber_cutoff_upper))[0]
    
    # load trackfile (downside, needs everything in memory)
    fibold, hdrold = tv.read(intrk)
    
    # rewrite the track vis file with the reduced number of fibers
    outstreams = []
    for i in reducedidx:
        outstreams.append( fibold[i] )
    
    n_fib_out = len(outstreams)
    hdrnew = hdrold.copy()
    hdrnew['n_count'] = n_fib_out
    
    #print("Compute length array for cutted fibers")
    #le = compute_length_array(streams=outstreams)
    print("Write out file: %s" % outtrk)
    print("Number of fibers out : %d" % hdrnew['n_count'])
    tv.write(outtrk, outstreams, hdrnew)
    print("File wrote : %d" % os.path.exists(outtrk))
Example #22
0
"""
Perform QuickBundles clustering with a 10mm distance threshold after having
downsampled the streamlines to have only 12 points.
"""

print("Computing bundles")
qb = QuickBundles(streamlines, dist_thr=int(sys.argv[3]), pts=int(sys.argv[4]))
print("Completed")
"""
qb has attributes like `centroids` (cluster representatives), `total_clusters`
(total number of clusters) and methods like `partitions` (complete description
of all clusters) and `label2tracksids` (provides the indices of the streamlines
which belong in a specific cluster).
"""
centroids = qb.centroids
print(len(centroids))
streamlines = [i[0] for i in streams]
for i, centroid in enumerate(centroids):
    print(i)
    inds = qb.label2tracksids(i)
    list1 = []
    for number in range(1, len(inds)):
        list1.append((streamlines[inds[number]], None, None))
        saveTo = sys.argv[2] + '/' + ` i ` + '.trk'
        tv.write(saveTo, list(list1))
print("Process COMPLETED")

os.system(
    'python /home/manu/Desktop/TESI2PARTE/tractconverter-master/scripts/WalkingTractConverter.py -i '
    + sys.argv[2] + ' -trk2tck -o  ' + sys.argv[2] + ' -a ' + sys.argv[5])
Example #23
0
aff = np.eye(4)
aff[0, 0] = -1
img = nib.Nifti1Image(counts_trackvis.astype('int16'), aff)
nib.save(img, 'counts_trackvis.nii.gz')
img = nib.Nifti1Image(counts_nifti.astype('int16'), aff)
nib.save(img, 'counts_nifti.nii.gz')

hdr = empty_header()
hdr['voxel_size'] = (1, 1, 1)
hdr['voxel_order'] = 'las'
hdr['vox_to_ras'] = aff
hdr['dim'] = counts_nifti.shape

#Treat these streamlines like they are in trackvis format and save them
streamlines_trackvis = ((ii, None, None) for ii in streamlines)
write('slAsTrackvis.trk', streamlines_trackvis, hdr)

#Move these streamlines from nifti to trackvis format and save them
streamlines_nifti = ((ii + .5, None, None) for ii in streamlines)
write('slAsNifti.trk', streamlines_nifti, hdr)
"""
Trackvis:
A------------
| C |   |   |
----B--------
|   |   |   |
-------------
|   |   |   |
------------D

A = [0, 0]
def run_LiFE(subject):
    print 'Process subject ' + subject

    if os.path.isfile(
            os.path.join(path_saveing, subject,
                         'Lamyg2LpMFG_LIFE_started.txt')) == False:
        print "LiFE Files do not exist for this subject, start calculation."

        if os.path.isfile(
                os.path.join(
                    path_saveing, subject,
                    'Lamyg2LpMFG_clustered.trk')) == True and os.path.isfile(
                        os.path.join(path_saveing, subject,
                                     '2M_SIFT.trk')) == True:
            print "All neccessary files there, continue ..."

            print "Show other processes that this subject is processed"
            done = np.array([1])
            np.savetxt(os.path.join(path_saveing, subject,
                                    'Lamyg2LpMFG_LIFE_started.txt'),
                       done,
                       delimiter=',')

            try:
                directory_output = os.path.join(path_saveing, subject)

                print "Start calculation for subject %s" % subject
                f_streamlines = os.path.join(path_saveing, subject,
                                             'Lamyg2LpMFG_clustered.trk')
                f_in_nifti = os.path.join(path, subject,
                                          'T1w/Diffusion/data.nii.gz')

                streams, hdr = tv.read(f_streamlines, points_space='voxel')
                streamlines = [i[0] for i in streams]

                data, affine, gtab, header, shell_mask = load_hcp_data(
                    path, subject)
                dim = header['dim'][1:4]

                # Otherwise all weights are NaN
                data[data <= 0.0] = 1.0

                print "Calculating neighborhood with LiFE"
                fiber_model_neighborhood = life.FiberModel(gtab)
                fiber_fit_neighborhood = fiber_model_neighborhood.fit(
                    data, streamlines, affine=np.eye(4))
                indices_neighborhood = fiber_fit_neighborhood.vox_coords

                neighborhood = np.zeros(dim, dtype=bool)
                for i in range(indices_neighborhood.shape[0]):
                    neighborhood[indices_neighborhood[i][0],
                                 indices_neighborhood[i][1],
                                 indices_neighborhood[i][2]] = 1

                save_as_nifti(
                    path_saveing + subject + "/Lamyg2LpMFG_neighborhood",
                    neighborhood.astype(np.int), affine)

                print 'Find fiber that pass through neighborhood'
                f_streamlines_whole_brain = path_saveing + subject + "/2M_SIFT.trk"
                streams_whole_brain, hdr_whole_brain = tv.read(
                    f_streamlines_whole_brain, points_space='voxel')
                streamlines_whole_brain = [i[0] for i in streams_whole_brain]
                neighborhood_streamlines = utils.target(
                    streamlines_whole_brain, neighborhood, affine=np.eye(4))

                neighborhood_streamlines = list(neighborhood_streamlines)

                strm = ((sl, None, None) for sl in neighborhood_streamlines)
                tv.write(path_saveing + subject +
                         "/Lamyg2LpMFG_2M_SIFT_without_path.trk",
                         strm,
                         hdr_mapping=hdr_whole_brain,
                         points_space='voxel')

                print "Combine streamlines"
                streamlines_together = neighborhood_streamlines + streamlines
                strm_together = ((sl, None, None)
                                 for sl in streamlines_together)
                tv.write(path_saveing + subject +
                         "/Lamyg2LpMFG_2M_SIFT_with_path.trk",
                         strm_together,
                         hdr_mapping=hdr_whole_brain,
                         points_space='voxel')

                print "Start LiFE optimization with new path"
                fiber_model_together = life.FiberModel(gtab)
                fiber_fit_together = fiber_model_together.fit(
                    data, streamlines_together, affine=np.eye(4))
                model_predict_together = fiber_fit_together.predict()
                indices_together = fiber_fit_together.vox_coords

                mask_with = np.zeros(dim, dtype=bool)
                whole_brain_together = np.zeros(header['dim'][1:5])
                for i in range(indices_together.shape[0]):
                    whole_brain_together[
                        indices_together[i][0], indices_together[i][1],
                        indices_together[i][2]] = model_predict_together[i]
                    mask_with[indices_together[i][0], indices_together[i][1],
                              indices_together[i][2]] = 1

                save_as_nifti(
                    path_saveing + subject +
                    "/Lamyg2LpMFG_LiFE_prediction_with_path",
                    whole_brain_together, affine)
                save_as_matlab_file(path_saveing + subject +
                                    "/Lamyg2LpMFG_LiFE_betas_with_path",
                                    beta=fiber_fit_together.beta)
                save_as_nifti(
                    path_saveing +
                    subject + "/Lamyg2LpMFG_LiFE_mask_with_path",
                    mask_with.astype(np.int), affine)

                print "Calculate RMSE with"
                model_error_together = model_predict_together - fiber_fit_together.data
                model_rmse_together = np.sqrt(
                    np.mean(model_error_together[..., ~gtab.b0s_mask]**2, -1))

                whole_brain_rmse_together = np.zeros(dim)
                for i in range(indices_together.shape[0]):
                    whole_brain_rmse_together[
                        indices_together[i][0], indices_together[i][1],
                        indices_together[i][2]] = model_rmse_together[i]

                save_as_nifti(
                    path_saveing + subject +
                    "/Lamyg2LpMFG_LiFE_rmse_with_path",
                    whole_brain_rmse_together, affine)

                print "Start LiFE optimization without new path"
                fiber_fit = copy.deepcopy(fiber_fit_together)
                fiber_fit.beta[-len(streamlines):] = 0
                model_predict = fiber_fit.predict()
                indices = fiber_fit.vox_coords

                whole_brain = np.zeros(header['dim'][1:5])
                mask_without = np.zeros(dim, dtype=bool)
                for i in range(indices.shape[0]):
                    whole_brain[indices[i][0], indices[i][1],
                                indices[i][2]] = model_predict[i]
                    mask_without[indices[i][0], indices[i][1],
                                 indices[i][2]] = 1

                save_as_nifti(
                    path_saveing + subject +
                    "/Lamyg2LpMFG_LiFE_prediction_without_path", whole_brain,
                    affine)
                save_as_matlab_file(path_saveing + subject +
                                    "/Lamyg2LpMFG_LiFE_betas_without_path",
                                    beta=fiber_fit.beta)
                save_as_nifti(
                    path_saveing + subject +
                    "/Lamyg2LpMFG_LiFE_mask_without_path",
                    mask_without.astype(np.int), affine)

                print "Calculate RMSE without"
                model_error = model_predict - fiber_fit.data
                model_rmse = np.sqrt(
                    np.mean(model_error[..., ~gtab.b0s_mask]**2, -1))

                whole_brain_rmse = np.zeros(dim)
                for i in range(indices.shape[0]):
                    whole_brain_rmse[indices[i][0], indices[i][1],
                                     indices[i][2]] = model_rmse[i]

                save_as_nifti(
                    path_saveing + subject +
                    "/Lamyg2LpMFG_LiFE_rmse_without_path", whole_brain_rmse,
                    affine)

                print "All done"
            except:
                print "An error occured while computing LiFE. Skip this subject."
        else:
            print "Some input files are missing, skip this subject."
    else:
        print "LiFE Files exist already for this subject, skip calculation."

    return 0
del streams#,hdr

if not os.path.isfile(C_fname):

    print 'Starting LARCH ...'
    tim=time.clock()
    C,atracks=tl.larch(tracks,[50.**2,20.**2,5.**2],True,True)
    #tracks=[tm.downsample(t,3) for t in tracks]
    #C=pf.local_skeleton_clustering(tracks,20.)
    print 'Done in total of ',time.clock()-tim,'seconds.'

    print 'Saving result...'
    pkl.save_pickle(C_fname,C)
    
    streams=[(i,None,None)for i in atracks]
    tv.write(appr_fname,streams,hdr)

else:

    print 'Loading result...'
    C=pkl.load_pickle(C_fname)

skel=[]
for c in C:
    skel.append(C[c]['repz'])
    
print 'Showing dataset after clustering...'
r=fos.ren()
fos.clear(r)
colors=np.zeros((len(skel),3))
for (i,s) in enumerate(skel):
Example #26
0
def get_streamlines(seed, mid, ends, num_sl, points_per_sl):
    result = []
    for i in range(num_sl):
        # Generate start, middle, and end points
        seed_offset = np.random.normal(loc=0, scale=3, size=3)
        mid_offset = np.random.normal(loc=0, scale=3, size=3)
        end_offset = np.random.normal(loc=0, scale=3, size=3)
        sl = np.array([
            seed + seed_offset, mid + mid_offset,
            ends[randint(0,
                         len(ends) - 1)] + end_offset
        ])

        # Upscale to desired number of points
        streamlines = set_number_of_points([sl], points_per_sl)
        sl = streamlines[0]

        # Add noise to non-keypoints
        noise = np.random.normal(loc=0, scale=0.35, size=sl.shape)
        noise[0], noise[len(noise) //
                        2], noise[-1] = [0, 0,
                                         0]  # don't change the seed points
        sl += noise

        # Add translation to all points
        curr_seed, curr_mid, curr_end = sl[0], sl[len(sl) // 2], sl[-1]
        t = np.random.uniform(low=-5, high=5, size=3)
        sl += t

        # Restore the keypoints to before translation
        sl[0], sl[len(sl) // 2], sl[-1] = curr_seed, curr_mid, curr_end

        result.append(sl)

    fn = "../../../data/PRE_SAMPLED/tractograms/599469_0_CST_left.trk"
    _, header = trackvis.read(fn)
    streams = [(sl, None, None) for sl in result]
    trackvis.write('result1.trk', streams, header)

    streamlines = result
    coords = np.reshape(streamlines, (-1, 3))
    x, y, z = coords[:, 0], coords[:, 1], coords[:, 2]

    fig = plt.figure()
    ax = Axes3D(fig)
    ax.scatter(list(x), list(y), list(z))
    ax.set_xlim([-50, 0])
    ax.set_ylim([-40, 0])
    ax.set_zlim([-60, 70])
    plt.show()

    #print(x)

    colors, i = [], 0
    for sl in streamlines:
        colors.extend([i for x in range(len(sl))])
        i += 1
    colors = np.array(colors)

    #fig = px.line_3d(x=x, y=y, z=z, color=colors, range_x=[-50,10], range_y=[-50,10], range_z=[-60,60])
    fig = px.line_3d(x=x, y=y, z=z, color=colors)
    #fig.add_trace(px.scatter_3d(x=x, y=y, z=z, color=np.array([1 for i in range(len(x))])).data[0])
    fig.show()
    """
Example #27
0
def tractography_to_trackvis_file(filename, tractography, affine=None, image_dimensions=None):
    trk_header = trackvis.empty_header()

    if affine is not None:
        pass
    elif hasattr(tractography, 'affine'):
        affine = tractography.affine
    else:
        raise ValueError("Affine transform has to be provided")

    trackvis.aff_to_hdr(affine, trk_header, True, True)
    trk_header['origin'] = 0.
    if image_dimensions is not None:
        trk_header['dim'] = image_dimensions
    elif hasattr(tractography, 'image_dimensions'):
        trk_header['dim'] = tractography.image_dimensions
    else:
        raise ValueError("Image dimensions needed to save a trackvis file")

    orig_data = tractography.tracts_data()
    data = {}
    for k, v in orig_data.items():
        if not isinstance(v[0], numpy.ndarray):
            continue
        if (v[0].ndim > 1 and any(d > 1 for d in v[0].shape[1:])):
            warn(
                "Scalar data %s ignored as trackvis "
                "format does not handle multivalued data" % k
            )
        else:
            data[k] = v

    #data_new = {}
    # for k, v in data.iteritems():
    #    if (v[0].ndim > 1 and v[0].shape[1] > 1):
    #        for i in xrange(v[0].shape[1]):
    #            data_new['%s_%02d' % (k, i)] = [
    #                v_[:, i] for v_ in v
    #            ]
    #    else:
    #       data_new[k] = v
    trk_header['n_count'] = len(tractography.tracts())
    trk_header['n_properties'] = 0
    trk_header['n_scalars'] = len(data)

    if len(data) > 10:
        raise ValueError('At most 10 scalars permitted per point')

    trk_header['scalar_name'][:len(data)] = numpy.array(
        [n[:20] for n in data],
        dtype='|S20'
    )
    trk_tracts = []

    for i, sl in enumerate(tractography.tracts()):
        scalars = None
        if len(data) > 0:
            scalars = numpy.vstack([
                data[k.decode('utf8')][i].squeeze()
                for k in trk_header['scalar_name'][:len(data)]
            ]).T

        trk_tracts.append((sl, scalars, None))

    trackvis.write(filename, trk_tracts, trk_header, points_space='rasmm')
streamlines = cPickle.load(pkl_file)
print type(streamlines)

hdr = tv.empty_header()
nifti_filename = 'dtifit__FA.nii'
nii_hdr = nb.load(nifti_filename).get_header()
hdr['dim'] = np.array(nii_hdr.get_data_shape())
hdr['voxel_size'] = np.array(nii_hdr.get_zooms())
aff = np.eye(4)
aff[0:3, 0:3] *= np.array(nii_hdr.get_zooms())
hdr['vox_to_ras'] = aff

print hdr['version']

for i in range(len(streamlines)):

    points_arr = streamlines[i][0]

    #invert y
    points_arr[:, 1] = nii_hdr.get_data_shape()[1] - points_arr[:, 1]

    #move to mm dim with 0,0,0 origin
    points_arr = points_arr * nii_hdr.get_zooms()

    streamlines[i] = (points_arr, None, None)

f = 'test.trk'
fobj = open(f, 'w')
tv.write(fobj, streamlines, hdr)
fobj.close()
Example #29
0
    def _run_interface(self, runtime):
        from dipy.tracking.utils import move_streamlines, affine_from_fsl_mat_file

        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.affine
        out_filename = op.abspath(self.inputs.out_filename)

        # Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(self.inputs.in_file,
                                                 as_generator=True)
        iflogger.info("MRTrix Header:")
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header["dim"] = [dx, dy, dz]
        trk_header["voxel_size"] = [vx, vy, vz]
        trk_header["n_count"] = header["count"]

        if isdefined(self.inputs.matrix_file) and isdefined(
                self.inputs.registration_image_file):
            iflogger.info("Applying transformation from matrix file %s",
                          self.inputs.matrix_file)
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(
                self.inputs.registration_image_file)
            reg_affine = registration_image_file.affine
            r_dx, r_dy, r_dz = get_data_dims(
                self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(
                self.inputs.registration_image_file)
            iflogger.info(
                "Using affine from registration image file %s",
                self.inputs.registration_image_file,
            )
            iflogger.info(reg_affine)
            trk_header["vox_to_ras"] = reg_affine
            trk_header["dim"] = [r_dx, r_dy, r_dz]
            trk_header["voxel_size"] = [r_vx, r_vy, r_vz]

            affine = np.dot(affine, np.diag(1.0 / np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(
                streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz],
                                           [r_vx, r_vy, r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii, None, None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info("Saving transformed Trackvis file as %s",
                          out_filename)
            iflogger.info("New TrackVis Header:")
            iflogger.info(trk_header)
        else:
            iflogger.info(
                "Applying transformation from scanner coordinates to %s",
                self.inputs.image_file,
            )
            axcode = aff2axcodes(affine)
            trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2]
            trk_header["vox_to_ras"] = affine
            transformed_streamlines = transform_to_affine(
                streamlines, trk_header, affine)
            trk_tracks = ((ii, None, None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info("Saving Trackvis file as %s", out_filename)
            iflogger.info("TrackVis Header:")
            iflogger.info(trk_header)
        return runtime
            beta_with = load_matlab_file(
                path_saveing + subject +
                "/RVLPFC2FIRSTamyg_bigRight_LiFE_betas_with_path")['beta']

            streams, hdr = tv.read(path_saveing + subject +
                                   "/RVLPFC2FIRSTamyg_bigRight_clustered.trk"
                                   )  #, points_space='voxel'
            streamlines = [i[0] for i in streams]

            weights_with = np.squeeze(beta_with[:, -len(streamlines):])
            optimized_with = streamlines[-len(streamlines):]

            try:
                optimized_sl = list(
                    np.array(optimized_with)[np.where(weights_with > 0)])

                optimized_save = ((sl, None, None) for sl in optimized_sl)
                tv.write(os.path.join(
                    path_saveing, subject,
                    'RVLPFC2FIRSTamyg_bigRight_optimized.trk'),
                         optimized_save,
                         hdr_mapping=hdr)
            except:
                print "Could not save streamlines, 0 streamlines detected"
        else:
            print "    Not all necessary files exist, skip subject"
    else:
        print "Strength of Evidence Exists already, skip subject"

# In[ ]:
Example #31
0
streamlines = [i[0] for i in streams]
"""
Perform QuickBundles clustering with a 10mm distance threshold after having
downsampled the streamlines to have only 12 points.
"""

print ("Computing bundles")
qb = QuickBundles(streamlines, dist_thr=int(sys.argv[3]), pts=int(sys.argv[4])) 
print ("Completed")

"""
qb has attributes like `centroids` (cluster representatives), `total_clusters`
(total number of clusters) and methods like `partitions` (complete description
of all clusters) and `label2tracksids` (provides the indices of the streamlines
which belong in a specific cluster).
"""
centroids = qb.centroids
print(len(centroids))
streamlines = [i[0] for i in streams]
for i, centroid in enumerate(centroids):
  print (i)
  inds = qb.label2tracksids(i)
  list1 = []
  for number in range(1,len(inds)):
    list1.append((streamlines[inds[number]], None, None))
    saveTo = sys.argv[2]+'/' + `i` + '.trk'
    tv.write(saveTo, list(list1))
print("Process COMPLETED")

os.system('python /home/manu/Desktop/TESI2PARTE/tractconverter-master/scripts/WalkingTractConverter.py -i '+sys.argv[2]+' -trk2tck -o  '+sys.argv[2]+' -a '+sys.argv[5])
            print "    Calculate optimized Fibers"
            beta_with = load_matlab_file(
                path_saveing + subject +
                "/RLamyg2LpMFG_LiFE_betas_with_path")['beta']

            streams, hdr = tv.read(
                path_saveing + subject +
                "/Lamyg2LpMFG_clustered.trk")  #, points_space='voxel'
            streamlines = [i[0] for i in streams]

            weights_with = np.squeeze(beta_with[:, -len(streamlines):])
            optimized_with = streamlines[-len(streamlines):]

            try:
                optimized_sl = list(
                    np.array(optimized_with)[np.where(weights_with > 0)])

                optimized_save = ((sl, None, None) for sl in optimized_sl)
                tv.write(os.path.join(path_saveing, subject,
                                      'Lamyg2LpMFG_optimized.trk'),
                         optimized_save,
                         hdr_mapping=hdr)
            except:
                print "Could not save streamlines, 0 streamlines detected"
        else:
            print "    Not all necessary files exist, skip subject"
    else:
        print "Strength of Evidence Exists already, skip subject"

# In[ ]:
Example #33
0
def tractography_to_trackvis_file(filename,
                                  tractography,
                                  affine=None,
                                  image_dimensions=None):
    trk_header = trackvis.empty_header()

    if affine is not None:
        pass
    elif hasattr(tractography, 'affine'):
        affine = tractography.affine
    else:
        raise ValueError("Affine transform has to be provided")

    trackvis.aff_to_hdr(affine, trk_header, True, True)
    trk_header['origin'] = 0.
    if image_dimensions is not None:
        trk_header['dim'] = image_dimensions
    elif hasattr(tractography, 'image_dimensions'):
        trk_header['dim'] = tractography.image_dimensions
    else:
        raise ValueError("Image dimensions needed to save a trackvis file")

    orig_data = tractography.tracts_data()
    data = {}
    for k, v in orig_data.items():
        if not isinstance(v[0], numpy.ndarray):
            continue
        if (v[0].ndim > 1 and any(d > 1 for d in v[0].shape[1:])):
            warn("Scalar data %s ignored as trackvis "
                 "format does not handle multivalued data" % k)
        else:
            data[k] = v

    #data_new = {}
    # for k, v in data.iteritems():
    #    if (v[0].ndim > 1 and v[0].shape[1] > 1):
    #        for i in xrange(v[0].shape[1]):
    #            data_new['%s_%02d' % (k, i)] = [
    #                v_[:, i] for v_ in v
    #            ]
    #    else:
    #       data_new[k] = v
    trk_header['n_count'] = len(tractography.tracts())
    trk_header['n_properties'] = 0
    trk_header['n_scalars'] = len(data)

    if len(data) > 10:
        raise ValueError('At most 10 scalars permitted per point')

    trk_header['scalar_name'][:len(data)] = numpy.array([n[:20] for n in data],
                                                        dtype='|S20')
    trk_tracts = []

    for i, sl in enumerate(tractography.tracts()):
        scalars = None
        if len(data) > 0:
            scalars = numpy.vstack([
                data[k.decode('utf8')][i].squeeze()
                for k in trk_header['scalar_name'][:len(data)]
            ]).T

        trk_tracts.append((sl, scalars, None))

    trackvis.write(filename, trk_tracts, trk_header, points_space='rasmm')
            print "    Strength of Evidence                    : %.2f" % strength

            np.savetxt(os.path.join(path_saveing, subject, 'RVLPFC2FIRSTamyg_bigRight_Strength_of_Evidence2.txt'), np.array([strength]), delimiter=',')

            print "    Calculate optimized Fibers"
            beta_with = load_matlab_file(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_betas_with_path")['beta']

            streams, hdr = tv.read(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_clustered.trk")#, points_space='voxel'
            streamlines = [i[0] for i in streams]

            weights_with = np.squeeze(beta_with[:, -len(streamlines):])
            optimized_with = streamlines[-len(streamlines):]
            
            try:
                optimized_sl = list(np.array(optimized_with)[np.where(weights_with > 0)])

                optimized_save = ((sl, None, None) for sl in optimized_sl)
                tv.write(os.path.join(path_saveing, subject, 'RVLPFC2FIRSTamyg_bigRight_optimized.trk'), optimized_save,  hdr_mapping=hdr)
            except:
                print "Could not save streamlines, 0 streamlines detected"
        else:
            print "    Not all necessary files exist, skip subject"
    else:
        print "Strength of Evidence Exists already, skip subject"


# In[ ]:



def run_LiFE(subject):
    print 'Process subject ' + subject

    if os.path.isfile(os.path.join(path_saveing, subject, 'RVLPFC2FIRSTamyg_bigRight_LIFE_started2.txt')) == False:
        print "LiFE Files do not exist for this subject, start calculation."

        if os.path.isfile(os.path.join(path_saveing, subject, 'RVLPFC2FIRSTamyg_bigRight_clustered.trk')) == True and os.path.isfile(os.path.join(path_saveing, subject, '2M_SIFT.trk')) == True:
            print "All neccessary files there, continue ..."

            print "Show other processes that this subject is processed"
            done = np.array([1])
            np.savetxt(os.path.join(path_saveing, subject, 'RVLPFC2FIRSTamyg_bigRight_LIFE_started2.txt'), done, delimiter=',')
            
            try: 
                directory_output = os.path.join(path_saveing, subject)

                print "Start calculation for subject %s" % subject
                f_streamlines = os.path.join(path_saveing, subject, 'RVLPFC2FIRSTamyg_bigRight_clustered.trk')
                f_in_nifti = os.path.join(path, subject, 'T1w/Diffusion/data.nii.gz')

                streams, hdr = tv.read(f_streamlines, points_space='voxel')
                streamlines = [i[0] for i in streams]

                data, affine, gtab, header, shell_mask = load_hcp_data(path, subject)
                dim = header['dim'][1:4]
                
                # Otherwise all weights are NaN
                data[data <= 0.0] = 1.0

                print "Calculating neighborhood with LiFE"
                fiber_model_neighborhood = life.FiberModel(gtab)
                fiber_fit_neighborhood = fiber_model_neighborhood.fit(data, streamlines, affine=np.eye(4))
                indices_neighborhood = fiber_fit_neighborhood.vox_coords

                neighborhood  = np.zeros(dim, dtype=bool)
                for i in range(indices_neighborhood.shape[0]):
                    neighborhood[indices_neighborhood[i][0], indices_neighborhood[i][1], indices_neighborhood[i][2]] = 1

                save_as_nifti(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_neighborhood", neighborhood.astype(np.int), affine)
                
                print 'Find fiber that pass through neighborhood'
                f_streamlines_whole_brain = path_saveing + subject + "/2M_SIFT.trk"
                streams_whole_brain, hdr_whole_brain = tv.read(f_streamlines_whole_brain, points_space='voxel')
                streamlines_whole_brain = [i[0] for i in streams_whole_brain]
                neighborhood_streamlines = utils.target(streamlines_whole_brain, neighborhood, affine=np.eye(4))

                neighborhood_streamlines = list(neighborhood_streamlines)

                strm = ((sl, None, None) for sl in neighborhood_streamlines)
                tv.write(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_2M_SIFT_without_path.trk", strm,  hdr_mapping=hdr_whole_brain, points_space='voxel')

                print "Combine streamlines"
                streamlines_together = neighborhood_streamlines + streamlines
                strm_together = ((sl, None, None) for sl in streamlines_together)
                tv.write(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_2M_SIFT_with_path.trk", strm_together,  hdr_mapping=hdr_whole_brain, points_space='voxel')

                print "Start LiFE optimization with new path"
                fiber_model_together = life.FiberModel(gtab)
                fiber_fit_together = fiber_model_together.fit(data, streamlines_together, affine=np.eye(4))
                model_predict_together = fiber_fit_together.predict()
                indices_together = fiber_fit_together.vox_coords

                mask_with  = np.zeros(dim, dtype=bool)
                whole_brain_together = np.zeros(header['dim'][1:5])
                for i in range(indices_together.shape[0]):
                    whole_brain_together[indices_together[i][0], indices_together[i][1], indices_together[i][2]] = model_predict_together[i]
                    mask_with[indices_together[i][0], indices_together[i][1], indices_together[i][2]] = 1

                save_as_nifti(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_prediction_with_path", whole_brain_together, affine)
                save_as_matlab_file(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_betas_with_path", beta = fiber_fit_together.beta)
                save_as_nifti(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_mask_with_path", mask_with.astype(np.int), affine)

                print "Calculate RMSE with"
                model_error_together = model_predict_together - fiber_fit_together.data
                model_rmse_together = np.sqrt(np.mean(model_error_together[..., ~gtab.b0s_mask] ** 2, -1))

                whole_brain_rmse_together = np.zeros(dim)
                for i in range(indices_together.shape[0]):
                    whole_brain_rmse_together[indices_together[i][0], indices_together[i][1], indices_together[i][2]] = model_rmse_together[i]

                save_as_nifti(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_rmse_with_path", whole_brain_rmse_together, affine)
                
                print "Start LiFE optimization without new path"
                fiber_fit = copy.deepcopy(fiber_fit_together)
                fiber_fit.beta[-len(streamlines):] = 0
                model_predict = fiber_fit.predict()
                indices = fiber_fit.vox_coords

                whole_brain = np.zeros(header['dim'][1:5])
                mask_without  = np.zeros(dim, dtype=bool)
                for i in range(indices.shape[0]):
                    whole_brain[indices[i][0], indices[i][1], indices[i][2]] = model_predict[i]
                    mask_without[indices[i][0], indices[i][1], indices[i][2]] = 1

                save_as_nifti(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_prediction_without_path", whole_brain, affine)
                save_as_matlab_file(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_betas_without_path", beta = fiber_fit.beta)
                save_as_nifti(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_mask_without_path", mask_without.astype(np.int), affine)

                print "Calculate RMSE without"
                model_error = model_predict - fiber_fit.data
                model_rmse = np.sqrt(np.mean(model_error[..., ~gtab.b0s_mask] ** 2, -1))

                whole_brain_rmse = np.zeros(dim)
                for i in range(indices.shape[0]):
                    whole_brain_rmse[indices[i][0], indices[i][1], indices[i][2]] = model_rmse[i]

                save_as_nifti(path_saveing + subject + "/RVLPFC2FIRSTamyg_bigRight_LiFE_rmse_without_path", whole_brain_rmse, affine)
                
                print "All done"
            except:
                print "An error occured while computing LiFE. Skip this subject."
        else:
            print "Some input files are missing, skip this subject."
    else:
        print "LiFE Files exist already for this subject, skip calculation."
    
    return 0
                        feature = ResampleFeature(nb_points=50)
                        metric = AveragePointwiseEuclideanMetric(
                            feature=feature)
                        qb = QuickBundles(threshold=10., metric=metric)
                        clusters = qb.cluster(streamlines)

                        major_cluster = clusters > 60
                        major_path = []
                        for j in range(len(clusters)):
                            if major_cluster[j] == True:
                                major_path.append([
                                    streamlines[i] for i in clusters[j].indices
                                ])
                        major_streams = list(itertools.chain(*major_path))

                        strm = ((sl, None, None) for sl in major_streams)
                        tv.write(f_out_clustered, strm, hdr_mapping=hdr)

                        print '    All done'

                    except:
                        print '    Could not Cluster streams'
                else:
                    print "    Could not load raw diffusion data, skip conversion and clustering."
        else:
            print "    Some input files are missing, skip this subject."
    else:
        print "    Clustered File exists already for this subject, skip calculation."

# In[ ]:
                        streamlines = [i[0] for i in streams]

                        feature = ResampleFeature(nb_points=50)
                        metric = AveragePointwiseEuclideanMetric(feature=feature)
                        qb = QuickBundles(threshold=10., metric=metric)
                        clusters = qb.cluster(streamlines)

                        major_cluster = clusters > 60
                        major_path = []
                        for j in range(len(clusters)):
                            if major_cluster[j] == True:
                                major_path.append([streamlines[i] for i in clusters[j].indices])
                        major_streams = list(itertools.chain(*major_path))

                        strm = ((sl, None, None) for sl in major_streams)
                        tv.write(f_out_clustered, strm,  hdr_mapping=hdr)
                        
                        print '    All done'
                        
                    except:
                        print '    Could not Cluster streams'
                else:
                    print "    Could not load raw diffusion data, skip conversion and clustering."
        else:
            print "    Some input files are missing, skip this subject."
    else:
        print "    Clustered File exists already for this subject, skip calculation."


# In[ ]:
    def _run_interface(self, runtime):

        tracts, hdr = tv.read(self.inputs.trackvis_file, as_generator=True)

        self.stat_files_data = []
        self.max_maps_data = []
        self.mean_maps_data = []
        for stat_file in self.inputs.stat_files:
            fmri_nii = nb.load(stat_file)
            self.stat_files_data.append(fmri_nii.get_data())

            self.max_maps_data.append(
                np.zeros(fmri_nii.get_header().get_data_shape()))
            self.mean_maps_data.append(
                np.zeros(fmri_nii.get_header().get_data_shape()))

        hdr = hdr.copy()
        if isdefined(self.inputs.stat_labels) and len(
                self.inputs.stat_labels) == len(self.inputs.stat_files):
            for i, label in enumerate(self.inputs.stat_labels):
                hdr['property_name'][i] = ('max_%s' % label)[0:19]
                #hdr['property_name'][1+i*2] = 'stat_mean_%s'%label
        else:
            for i in range(len(self.inputs.stat_files)):
                hdr['property_name'][i] = ('max%d' % i)[0:19]
                #hdr['property_name'][1+i*2] = 'stat_mean%d'%i

        tv.write(self.inputs.out_tracks,
                 self._gen_annotate_tracts(tracts, hdr), hdr)

        if isdefined(self.inputs.stat_labels) and len(
                self.inputs.stat_labels) == len(self.inputs.stat_files):
            for i, label in enumerate(self.inputs.stat_labels):
                nb.save(
                    nb.Nifti1Image(self.max_maps_data[i],
                                   fmri_nii.get_affine(),
                                   fmri_nii.get_header()),
                    self.inputs.out_max_map_prefix + "_%s" % label + '.nii')
                nb.save(
                    nb.Nifti1Image(self.mean_maps_data[i],
                                   fmri_nii.get_affine(),
                                   fmri_nii.get_header()),
                    self.inputs.out_mean_map_prefix + "_%s" % label + '.nii')
        else:
            for i in range(len(self.inputs.stat_files)):
                nb.save(
                    nb.Nifti1Image(self.max_maps_data[i],
                                   fmri_nii.get_affine(),
                                   fmri_nii.get_header()),
                    self.inputs.out_max_map_prefix + str(i) + '.nii')
                nb.save(
                    nb.Nifti1Image(self.mean_maps_data[i],
                                   fmri_nii.get_affine(),
                                   fmri_nii.get_header()),
                    self.inputs.out_mean_map_prefix + str(i) + '.nii')

        del self.mean_maps_data
        del self.max_maps_data
        del self.stat_files_data

        return runtime
Example #39
0
del streams  #,hdr

if not os.path.isfile(C_fname):

    print 'Starting LARCH ...'
    tim = time.clock()
    C, atracks = tl.larch(tracks, [50.**2, 20.**2, 5.**2], True, True)
    #tracks=[tm.downsample(t,3) for t in tracks]
    #C=pf.local_skeleton_clustering(tracks,20.)
    print 'Done in total of ', time.clock() - tim, 'seconds.'

    print 'Saving result...'
    pkl.save_pickle(C_fname, C)

    streams = [(i, None, None) for i in atracks]
    tv.write(appr_fname, streams, hdr)

else:

    print 'Loading result...'
    C = pkl.load_pickle(C_fname)

skel = []
for c in C:
    skel.append(C[c]['repz'])

print 'Showing dataset after clustering...'
r = fos.ren()
fos.clear(r)
colors = np.zeros((len(skel), 3))
for (i, s) in enumerate(skel):
Example #40
0
    def _run_interface(self, runtime):
        from dipy.tracking.utils import move_streamlines, \
            affine_from_fsl_mat_file
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.affine
        out_filename = op.abspath(self.inputs.out_filename)

        # Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(
            self.inputs.in_file, as_generator=True)
        iflogger.info('MRTrix Header:')
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header['dim'] = [dx, dy, dz]
        trk_header['voxel_size'] = [vx, vy, vz]
        trk_header['n_count'] = header['count']

        if isdefined(self.inputs.matrix_file) and isdefined(
                self.inputs.registration_image_file):
            iflogger.info('Applying transformation from matrix file %s',
                          self.inputs.matrix_file)
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(
                self.inputs.registration_image_file)
            reg_affine = registration_image_file.affine
            r_dx, r_dy, r_dz = get_data_dims(
                self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(
                self.inputs.registration_image_file)
            iflogger.info('Using affine from registration image file %s',
                          self.inputs.registration_image_file)
            iflogger.info(reg_affine)
            trk_header['vox_to_ras'] = reg_affine
            trk_header['dim'] = [r_dx, r_dy, r_dz]
            trk_header['voxel_size'] = [r_vx, r_vy, r_vz]

            affine = np.dot(affine, np.diag(1. / np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(
                streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz],
                                           [r_vx, r_vy, r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii, None, None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving transformed Trackvis file as %s',
                          out_filename)
            iflogger.info('New TrackVis Header:')
            iflogger.info(trk_header)
        else:
            iflogger.info(
                'Applying transformation from scanner coordinates to %s',
                self.inputs.image_file)
            axcode = aff2axcodes(affine)
            trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2]
            trk_header['vox_to_ras'] = affine
            transformed_streamlines = transform_to_affine(
                streamlines, trk_header, affine)
            trk_tracks = ((ii, None, None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving Trackvis file as %s', out_filename)
            iflogger.info('TrackVis Header:')
            iflogger.info(trk_header)
        return runtime
def rectify(tractogram_fn, out_fn):
    # Open file and extract streamlines
    streams, header = trackvis.read(tractogram_fn)
    streamlines = [s[0] for s in streams]

    # Get extrema
    beginnings = [s[0] for s in streamlines]
    #endings = [s[-1] for s in streamlines]

    # Run KMeans to cluster beginnings and endings both into 2 clusters
    kmeans_beginnings = KMeans(n_clusters=2).fit(beginnings)
    #kmeans_endings = KMeans(n_clusters = 2).fit(endings)

    beginnings_centers = kmeans_beginnings.cluster_centers_
    #endings_centers = kmeans_endings.cluster_centers_

    # Find the axis with the largest difference between extreme coordinates and
    diff_X = abs(beginnings_centers[0][0] - beginnings_centers[1][0])
    diff_Y = abs(beginnings_centers[0][1] - beginnings_centers[1][1])
    diff_Z = abs(beginnings_centers[0][2] - beginnings_centers[1][2])

    # Assign the cluster with the largest value for this axis to be the seed cluster
    max_val = max(diff_X, diff_Y, diff_Z)
    if diff_X == max_val:
        max_index = 0
    elif diff_Y == max_val:
        max_index = 1
    else:
        max_index = 2
    if beginnings_centers[0][max_index] > beginnings_centers[1][max_index]:
        beginnings_beginnings = 0
        beginnings_endings = 1
    else:
        beginnings_beginnings = 1
        beginnings_endings = 0
    """
    # Assign the more populous cluster for beginnings to be the beginnings
    pred_beginnings = kmeans_beginnings.predict(beginnings)
    beginnings_sum = np.sum(pred_beginnings) / len(pred_beginnings)
    if beginnings_sum > 0.5:
        #beginnings_beginnings = beginnings_centers[1]
        #beginnings_endings = beginnings_centers[0]
        beginnings_beginnings = 1
        beginnings_endings = 0
    else:
        #beginnings_beginnings = beginnings_centers[0]
        #beginnings_endings = beginnings_centers[1]
        beginnings_beginnings = 0
        beginnings_endings = 1
    """

    # Assign the more populous cluster for endings to be the endings
    #pred_endings = kmeans_endings.predict(endings)
    #endings_sum = np.sum(pred_endings) / len(pred_endings)
    #if endings_sum > 0.5:
    #    endings_endings = 1
    #    endings_beginnings = 0
    #else:
    #    endings_endings = 0
    #    endings_beginnings = 1

    # For each streamline, run kmeans.fit() on the beginning point and reverse it if appropriate
    new_streamlines = []
    for sl in streamlines:
        seed = np.array([sl[0]], dtype=float)
        result = kmeans_beginnings.predict(seed)

        # If assigned to beginnings_endings, reverse the streamline
        if result[0] == beginnings_endings:
            reversed_sl = sl[::-1]
            new_streamlines.append(reversed_sl)
        else:
            new_streamlines.append(sl)

    #fig = plt.figure()
    #ax = Axes3D(fig)
    #seeds = np.array([s[0] for s in new_streamlines])
    #x, y, z = seeds[:,0], seeds[:,1], seeds[:,2]
    #ax.scatter(list(x), list(y), list(z), c='#003cff')

    #seeds = np.array([s[-1] for s in new_streamlines])
    #x, y, z = seeds[:,0], seeds[:,1], seeds[:,2]
    #ax.scatter(list(x), list(y), list(z), c='#aabbff')
    #plt.show()

    new_streamlines = [(s, None, None) for s in new_streamlines]
    trackvis.write(out_fn, streamlines=new_streamlines, hdr_mapping=header)
# Open the file and extract streamlines
streams, header = trackvis.read(tractogram_fn)
streamlines = [sl[0] for sl in streams]

# Run quickbundles with chosen parameters
feature = ResampleFeature(nb_points=points_per_sl)
metric = AveragePointwiseEuclideanMetric(feature)
qb = QuickBundles(threshold=10.,
                  max_nb_clusters=max_num_centroids,
                  metric=metric)
clusters = qb.cluster(streamlines)

# Extract the centroids
centroids = [cluster.centroid for cluster in clusters]

# If not enough generated, fill with empty streamlines
diff = max_num_centroids - len(centroids)
if diff > 0:
    print(
        "Not enough centroids generated, so generating empty streamlines for padding."
    )
    empty_sl = np.zeros((points_per_sl, 3), dtype=np.float32)
    for num in range(diff):
        centroids.append(empty_sl)

# Convert to TrackVis format and write to file
centroids = [(c, None, None) for c in centroids]
out_fn = output_dir + '/' + subject + '_' + tract_name + '.trk'
trackvis.write(out_fn, centroids, header)
Example #43
0
aff = np.eye(4)
aff[0, 0] = -1
img = nib.Nifti1Image(counts_trackvis.astype('int16'), aff)
nib.save(img, 'counts_trackvis.nii.gz')
img = nib.Nifti1Image(counts_nifti.astype('int16'), aff)
nib.save(img, 'counts_nifti.nii.gz')

hdr = empty_header()
hdr['voxel_size'] = (1,1,1)
hdr['voxel_order'] = 'las'
hdr['vox_to_ras'] = aff
hdr['dim'] = counts_nifti.shape

#Treat these streamlines like they are in trackvis format and save them
streamlines_trackvis = ((ii,None,None) for ii in streamlines)
write('slAsTrackvis.trk', streamlines_trackvis, hdr)

#Move these streamlines from nifti to trackvis format and save them
streamlines_nifti = ((ii+.5,None,None) for ii in streamlines)
write('slAsNifti.trk', streamlines_nifti, hdr)

"""
Trackvis:
A------------
| C |   |   |
----B--------
|   |   |   |
-------------
|   |   |   |
------------D