def clearScalars( trkFile1, outputFile ): ''' Copy scalars from trkFile1 to trkFile2 ''' s = io.loadTrk( trkFile1 ) tracks = s[0] origHeader = s[1] tracksHeader = numpy.copy( s[1] ) newTracks = [] for tCounter, t in enumerate( tracks ): tCoordinates = t[0] tScalars = t[1] tProperties = t[2] # clear scalars newTracks.append( ( tCoordinates, None, tProperties ) ) # write trkFile2 with update scalars io.saveTrk( outputFile, newTracks, tracksHeader, None, True ) c.info( 'Cleared scalars from ' + trkFile1 + ' and saved as ' + outputFile )
def transform( tracks, matrix, outputFile=None, verbose=False, threadName='Global' ): ''' ''' # O(Tracks x Points) # # loop through all tracks and transform'em!! for t in xrange( len( tracks ) ): track = tracks[t] points = track[0] newPoints = numpy.copy( points ) # loop through all points of the current track for p in xrange( len( points ) ): pointBefore = points[p] pointAfter = numpy.append( pointBefore, 1 ) pointAfter = numpy.dot( matrix, pointAfter ) pointAfter = numpy.delete( pointAfter, -1 ) newPoints[p] = pointAfter # create a new track with the transformed points newTrack = ( newPoints, track[1], track[2] ) # replace the old track with the newTrack tracks[t] = newTrack if not outputFile: return tracks else: # write it out to disk io.saveTrk( outputFile, tracks, None, None, True )
def sub(master, tracks, outputFile=None, verbose=False, threadName='Global'): ''' Subtract tracks from master. Both parameters are nibabel.trackvis.streamlines objects. Calculation cost: O(M*N) Returns the result as a nibabel.trackvis.streamlines object or writes it to the file system if an outputFile is specified. ''' masterSizeBefore = len(master) subtractedCount = 0 # O(M*N) for t in xrange(masterSizeBefore): if subtractedCount == len(tracks): # no way we can subtract more.. stop the loop return master c.debug( threadName + ': Looking for more tracks to subtract.. [Check #' + str(t) + '/' + str(masterSizeBefore) + ']', verbose) if master[t] == -1: # this fiber was already removed, skip to next one continue for u in xrange(len(tracks)): if tracks[u] == -1: # this fiber was already removed, skip to next one continue # compare fiber if [p for points in master[t][0] for p in points ] == [p for points in tracks[u][0] for p in points]: # fibers are equal, set them as dirty master[t] = -1 tracks[u] = -1 subtractedCount += 1 # ... and jump out break master = filter(lambda t: t != -1, master) if not outputFile: return master else: # write it out to disk io.saveTrk(outputFile, master, None, None, True)
def sub(master, tracks, outputFile=None, verbose=False, threadName="Global"): """ Subtract tracks from master. Both parameters are nibabel.trackvis.streamlines objects. Calculation cost: O(M*N) Returns the result as a nibabel.trackvis.streamlines object or writes it to the file system if an outputFile is specified. """ masterSizeBefore = len(master) subtractedCount = 0 # O(M*N) for t in xrange(masterSizeBefore): if subtractedCount == len(tracks): # no way we can subtract more.. stop the loop return master c.debug( threadName + ": Looking for more tracks to subtract.. [Check #" + str(t) + "/" + str(masterSizeBefore) + "]", verbose, ) if master[t] == -1: # this fiber was already removed, skip to next one continue for u in xrange(len(tracks)): if tracks[u] == -1: # this fiber was already removed, skip to next one continue # compare fiber if [p for points in master[t][0] for p in points] == [p for points in tracks[u][0] for p in points]: # fibers are equal, set them as dirty master[t] = -1 tracks[u] = -1 subtractedCount += 1 # ... and jump out break master = filter(lambda t: t != -1, master) if not outputFile: return master else: # write it out to disk io.saveTrk(outputFile, master, None, None, True)
def __init__(self, scalarName, volume): """ """ super(FyMapAction, self).__init__(scalarName) # load volume self._image = io.readImage(volume) self._imageHeader = self._image.header self._imageDimensions = self._image.shape[:3] self._imageSpacing = self._imageHeader.get_zooms()[:3]
def run( self, files ): ''' ''' for f in files: header = io.loadTrkHeaderOnly( f ) dimensions = header['dim'] spacing = header['voxel_size'] origin = header['origin'] numberOfScalars = header['n_scalars'] scalarNames = header['scalar_name'] numberOfProperties = header['n_properties'] propertyNames = header['property_name'] vox2rasMatrix = header['vox_to_ras'] voxelOrder = header['voxel_order'] pad1 = header['pad1'] pad2 = header['pad2'] imageOrientation = header['image_orientation_patient'] numberOfTracks = header['n_count'] version = header['version'] c.info( 'FILE: ' + f ) c.info( ' TRACKVIS VERSION: ' + str( version ) ) c.info( ' NUMBER OF TRACKS: ' + str( numberOfTracks ) ) c.info( ' DIMENSIONS: ' + str( dimensions ) ) c.info( ' SPACING: ' + str( spacing ) ) c.info( ' ORIGIN: ' + str( origin ) ) c.info( ' NUMBER OF SCALARS: ' + str( numberOfScalars ) ) if numberOfScalars > 0: c.info( ' SCALARS: ' + str( scalarNames ) ) c.info( ' NUMBER OF PROPERTIES: ' + str( numberOfProperties ) ) if numberOfProperties > 0: c.info( ' PROPERTIES: ' + str( propertyNames ) ) if version == 2: # only in trackvis v2 c.info( ' VOX2RAS Matrix:' ) c.info( ' ' + str( vox2rasMatrix[0] ) ) c.info( ' ' + str( vox2rasMatrix[1] ) ) c.info( ' ' + str( vox2rasMatrix[2] ) ) c.info( ' ' + str( vox2rasMatrix[3] ) ) c.info( ' VOXEL ORDER: ' + str( voxelOrder ) ) #c.info( ' IMAGE ORIENTATION: ' ) #c.info( ' ' + str( imageOrientation ) ) #c.info( ' PADDING 1: ' + str( pad1 ) ) #c.info( ' PADDING 2: ' + str( pad2 ) ) print
def run(self, files): ''' ''' for f in files: header = io.loadTrkHeaderOnly(f) dimensions = header['dim'] spacing = header['voxel_size'] origin = header['origin'] numberOfScalars = header['n_scalars'] scalarNames = header['scalar_name'] numberOfProperties = header['n_properties'] propertyNames = header['property_name'] vox2rasMatrix = header['vox_to_ras'] voxelOrder = header['voxel_order'] pad1 = header['pad1'] pad2 = header['pad2'] imageOrientation = header['image_orientation_patient'] numberOfTracks = header['n_count'] version = header['version'] c.info('FILE: ' + f) c.info(' TRACKVIS VERSION: ' + str(version)) c.info(' NUMBER OF TRACKS: ' + str(numberOfTracks)) c.info(' DIMENSIONS: ' + str(dimensions)) c.info(' SPACING: ' + str(spacing)) c.info(' ORIGIN: ' + str(origin)) c.info(' NUMBER OF SCALARS: ' + str(numberOfScalars)) if numberOfScalars > 0: c.info(' SCALARS: ' + str(scalarNames)) c.info(' NUMBER OF PROPERTIES: ' + str(numberOfProperties)) if numberOfProperties > 0: c.info(' PROPERTIES: ' + str(propertyNames)) if version == 2: # only in trackvis v2 c.info(' VOX2RAS Matrix:') c.info(' ' + str(vox2rasMatrix[0])) c.info(' ' + str(vox2rasMatrix[1])) c.info(' ' + str(vox2rasMatrix[2])) c.info(' ' + str(vox2rasMatrix[3])) c.info(' VOXEL ORDER: ' + str(voxelOrder)) #c.info( ' IMAGE ORIENTATION: ' ) #c.info( ' ' + str( imageOrientation ) ) #c.info( ' PADDING 1: ' + str( pad1 ) ) #c.info( ' PADDING 2: ' + str( pad2 ) ) print
def copyScalars( trkFile1, trkFile2, outputFile ): ''' Copy scalars from trkFile1 to trkFile2 ''' s = io.loadTrk( trkFile1 ) s2 = io.loadTrk( trkFile2 ) tracks = s[0] tracks2 = s2[0] origHeader = s[1] origHeader2 = s2[1] tracksHeader = numpy.copy( s[1] ) tracksHeader2 = numpy.copy( s2[1] ) #if tracksHeader['n_count'] != tracksHeader2['n_count']: # c.error( 'The track counts do not match!' ) # sys.exit( 2 ) # now copy tracksHeader2['n_scalars'] = tracksHeader['n_scalars'] tracksHeader2['scalar_name'] = tracksHeader['scalar_name'] newTracks2 = [] for tCounter, t in enumerate( tracks ): tCoordinates = t[0] tScalars = t[1] # copy scalars over #tracks2[tCounter][1] = numpy.copy( tScalars ) newTracks2.append( ( tracks2[tCounter][0], tScalars[:], tracks2[tCounter][2] ) ) # write trkFile2 with update scalars io.saveTrk( outputFile, newTracks2, tracksHeader2, None, True ) c.info( 'Copied Scalars from ' + trkFile1 + ' to ' + trkFile2 + ' and saved as ' + outputFile )
def createSampleTrkFile( outputfile ): ''' Create a sample track file which contains fibers with random points. The fiber coordinates match the sample volume from above. ''' fibers = [] numberOfFibers = NUMBER_OF_FIBERS print Colors.PURPLE + 'Creating a sample trkFile (' + Colors.CYAN + str( numberOfFibers ) + ' fibers' + Colors.PURPLE + '): ' + Colors.ORANGE + outputfile + Colors.PURPLE + '..' + Colors._CLEAR for f in range( numberOfFibers ): # from 3 to 10 points, randomly chosen numberOfPoints = random.randint( 3, 10 ) # the point array reserves 3 components for each point (x,y,z) points = numpy.empty( shape=( numberOfPoints, 3 ), dtype=numpy.float32 ) for p in range( numberOfPoints ): # create random points with coordinates in the range 0..9 to match the sample volume's dimensions points[p] = [random.randint( 0, int( SAMPLE_VOLUME_DIMENSION_X * SAMPLE_VOLUME_SPACING_X ) - 1 ), random.randint( 0, int( SAMPLE_VOLUME_DIMENSION_Y * SAMPLE_VOLUME_SPACING_Y ) - 1 ), random.randint( 0, int( SAMPLE_VOLUME_DIMENSION_Z * SAMPLE_VOLUME_SPACING_Z ) - 1 )] # create an appropriate header header = eH() header['voxel_size'] = ( SAMPLE_VOLUME_SPACING_X, SAMPLE_VOLUME_SPACING_Y, SAMPLE_VOLUME_SPACING_Z ) header['dim'] = ( SAMPLE_VOLUME_DIMENSION_X, SAMPLE_VOLUME_DIMENSION_Y, SAMPLE_VOLUME_DIMENSION_Z ) # store the trk file fibers.append( ( points, None, None ) ) io.saveTrk( outputfile, fibers, header, None, True ) return numberOfFibers
from _common import FNNDSCConsole as c from _common import FNNDSCFileIO as io from _common import FNNDSCUtil as u import sys import numpy # ENTRYPOINT if __name__ == "__main__": track = sys.argv[1] trackId = int(sys.argv[2]) volume = sys.argv[3] s = io.loadTrk(track) tracks = s[0] origHeader = s[1] image = io.readImage(volume) imageHeader = image.header imageDimensions = image.shape[:3] imageSpacing = imageHeader.get_zooms() singleTrack = tracks[trackId] coords = singleTrack[0] valueSum = 0 length = 0 _last = None
def run(self, input, output, mode, verbose, jobs): if len(input) < 2: c.error("Please specify at least two *.trk files as input!") sys.exit(2) if os.path.exists(output): # abort if file already exists c.error("File " + str(output) + " already exists..") c.error("Aborting..") sys.exit(2) jobs = int(jobs) if jobs < 1 or jobs > 32: jobs = 1 # load 'master' mTracks = io.loadTrk(input[0]) # copy the tracks and the header from the 'master' c.info("Master is " + input[0]) outputTracks = mTracks[0] c.info("Number of tracks: " + str(len(outputTracks))) header = mTracks[1] # remove the first input input.pop(0) if mode == "add": # # ADD # for i in input: iTracks = io.loadTrk(i) # add the tracks c.debug("Adding " + str(len(iTracks[0])) + " tracks from " + i + " to master..", verbose) outputTracks = TrackvisCalcLogic.add(outputTracks, iTracks[0]) c.debug("Number of output tracks after final addition: " + str(len(outputTracks)), verbose) elif mode == "sub": # # SUB # c.debug("Using " + str(jobs) + " threads..", verbose) mergedOutputTracks = outputTracks[:] for i in input: iTracks = io.loadTrk(i) # subtract the tracks c.info("Subtracting " + i + " (" + str(len(iTracks[0])) + " tracks) from master..") # # THREADED COMPONENT # numberOfThreads = jobs c.info("Splitting master into " + str(jobs) + " pieces..") splittedOutputTracks = u.split_list(mergedOutputTracks, numberOfThreads) # list of threads t = [None] * numberOfThreads # list of alive flags a = [None] * numberOfThreads # list of tempFiles f = [None] * numberOfThreads for n in xrange(numberOfThreads): # mark thread as alive a[n] = True # fire the thread and give it a filename based on the number tmpFile = tempfile.mkstemp(".trk", "t_calc")[1] f[n] = tmpFile t[n] = Process( target=TrackvisCalcLogic.sub, args=(splittedOutputTracks[n][:], iTracks[0][:], tmpFile, verbose, "Thread-" + str(n + 1)), ) c.info("Starting Thread-" + str(n + 1) + "...") t[n].start() allDone = False while not allDone: time.sleep(1) for n in xrange(numberOfThreads): a[n] = t[n].is_alive() if not any(a): # if no thread is alive allDone = True # # END OF THREADED COMPONENT # c.info("All Threads done!") c.info("Merging output..") # now read all the created tempFiles and merge'em to one # first thread output is the master here tmpMaster = f[0] tMasterTracks = io.loadTrk(tmpMaster) for tmpFileNo in xrange(1, len(f)): tTracks = io.loadTrk(f[tmpFileNo]) # add them mergedOutputTracks = TrackvisCalcLogic.add(tMasterTracks[0], tTracks[0]) c.info("Merging done!") # some stats c.info("Number of output tracks after final removal: " + str(len(mergedOutputTracks))) outputTracks = mergedOutputTracks # now save the outputTracks io.saveTrk(output, outputTracks, header) c.info("All done!")
def makeMatrix(inputs, outputs, no_cortex): """ Make 1/ADC, ADC, FA, FiberNumber, FiberLength, E1, E2, E3 connectivity matrices. """ s = io.loadTrk(outputs["fibers_final"]) tracks = s[0] header = s[1] scalarNames = header["scalar_name"].tolist() matrix = {} # check if the segmentation is mapped try: scalarNames.index("segmentation") except: c.error(Colors.RED) for s in scalarNames: if not s: continue print s return for i in inputs: if i == "fibers" or i == "segmentation" or i == "T1" or i == "b0": # we do not map these continue # for tCounter, t in enumerate( tracks ): try: labelIndex = scalarNames.index("segmentation") adcIndex = scalarNames.index("adc") faIndex = scalarNames.index("fa") e1Index = scalarNames.index("e1") e2Index = scalarNames.index("e2") e3Index = scalarNames.index("e3") lengthIndex = scalarNames.index("length") except: c.error("Not all scalars were found: aparc_aseg_endlabel, adc, fa, length, e1, e2, e3") sys.exit(2) m_fn = numpy.zeros([68, 68]) m_fa = numpy.zeros([68, 68]) m_adc = numpy.zeros([68, 68]) m_adcinv = numpy.zeros([68, 68]) m_len = numpy.zeros([68, 68]) m_e1 = numpy.zeros([68, 68]) m_e2 = numpy.zeros([68, 68]) m_e3 = numpy.zeros([68, 68]) fslabel_vol = [ 2012, 2019, 2032, 2014, 2020, 2018, 2027, 2028, 2003, 2024, 2017, 2026, 2002, 2023, 2010, 2022, 2031, 2029, 2008, 2025, 2005, 2021, 2011, 2013, 2007, 2016, 2006, 2033, 2009, 2015, 2001, 2030, 2034, 2035, 1012, 1019, 1032, 1014, 1020, 1018, 1027, 1028, 1003, 1024, 1017, 1026, 1002, 1023, 1010, 1022, 1031, 1029, 1008, 1025, 1005, 1021, 1011, 1013, 1007, 1016, 1006, 1033, 1009, 1015, 1001, 1030, 1034, 1035, ] for tCounter, t in enumerate(tracks): tCoordinates = t[0] tScalars = t[1] fa = numpy.mean(tScalars[:, faIndex]) adc = numpy.mean(tScalars[:, adcIndex]) e1 = numpy.mean(tScalars[:, e1Index]) e2 = numpy.mean(tScalars[:, e2Index]) e3 = numpy.mean(tScalars[:, e3Index]) len = tScalars[0, lengthIndex] firstLabel = tScalars[0, labelIndex] lastLabel = tScalars[-1, labelIndex] try: fIndex = fslabel_vol.index(firstLabel) lIndex = fslabel_vol.index(lastLabel) except: continue print "found", firstLabel, lastLabel m_fn[fIndex, lIndex] += 1 m_fa[fIndex, lIndex] += fa m_adc[fIndex, lIndex] += adc m_e1[fIndex, lIndex] += e1 m_e2[fIndex, lIndex] += e2 m_e3[fIndex, lIndex] += e3 m_adcinv[fIndex, lIndex] += 1 / adc m_len[fIndex, lIndex] += len # symmetrize matrices m_fn = m_fn + m_fn.T - numpy.diag(m_fn.diagonal()) m_fa = m_fa + m_fa.T - numpy.diag(m_fa.diagonal()) m_adc = m_adc + m_adc.T - numpy.diag(m_adc.diagonal()) m_e1 = m_e1 + m_e1.T - numpy.diag(m_e1.diagonal()) m_e2 = m_e2 + m_e2.T - numpy.diag(m_e2.diagonal()) m_e3 = m_e3 + m_e3.T - numpy.diag(m_e3.diagonal()) m_adcinv = m_adcinv + m_adcinv.T - numpy.diag(m_adcinv.diagonal()) m_len = m_len + m_len.T - numpy.diag(m_len.diagonal()) # normalize matrices m_fa[:] /= m_fn[:] m_adc[:] /= m_fn[:] m_e1[:] /= m_fn[:] m_e2[:] /= m_fn[:] m_e3[:] /= m_fn[:] m_adcinv[:] /= m_fn[:] m_len[:] /= m_fn[:] m_fa = numpy.nan_to_num(m_fa) m_e1 = numpy.nan_to_num(m_e1) m_e2 = numpy.nan_to_num(m_e2) m_e3 = numpy.nan_to_num(m_e3) m_adc = numpy.nan_to_num(m_adc) m_adcinv = numpy.nan_to_num(m_adcinv) m_len = numpy.nan_to_num(m_len) # save as .mat and .csv sio.savemat( outputDirectory + "fibmap_all_cMatrix.mat", { "m_fiberNumber": m_fn, "m_fa": m_fa, "m_adc": m_adc, "m_adcInverse": m_adcinv, "m_fiberLength": m_len, "m_e1": m_e1, "m_e2": m_e2, "m_e3": m_e3, }, ) numpy.savetxt(outputDirectory + "fibmap_fibernumber_cMatrix.csv", m_fn, delimiter=",") numpy.savetxt(outputDirectory + "fibmap_fa_cMatrix.csv", m_fa, delimiter=",") numpy.savetxt(outputDirectory + "fibmap_e1_cMatrix.csv", m_e1, delimiter=",") numpy.savetxt(outputDirectory + "fibmap_e2_cMatrix.csv", m_e2, delimiter=",") numpy.savetxt(outputDirectory + "fibmap_e3_cMatrix.csv", m_e3, delimiter=",") numpy.savetxt(outputDirectory + "fibmap_adc_cMatrix.csv", m_adc, delimiter=",") numpy.savetxt(outputDirectory + "fibmap_adcinv_cMatrix.csv", m_adcinv, delimiter=",") numpy.savetxt(outputDirectory + "fibmap_fiberlength_cMatrix.csv", m_len, delimiter=",") c.info("Connectivity matrices generated and stored.")
from _common import FNNDSCFileIO as io from _common import FNNDSCUtil as u import sys import numpy # ENTRYPOINT if __name__ == "__main__": track = sys.argv[1] trackId = int( sys.argv[2] ) volume = sys.argv[3] s = io.loadTrk( track ) tracks = s[0] origHeader = s[1] image = io.readImage( volume ) imageHeader = image.header imageDimensions = image.shape[:3] imageSpacing = imageHeader.get_zooms() singleTrack = tracks[trackId] coords = singleTrack[0] valueSum = 0
save_image(img, volFile) # trk file fibers = [] # 2,5,6 # 3,5,7 # 2,6,7 # 8,7,3 # 9,5,4 points = np.array([[2, 5, 6], [3, 5, 7], [2, 6, 7], [8, 7, 3], [9, 5, 4]], dtype=np.float32) fibers.append((points, None, None)) io.saveTrk(trkFile, fibers, None, None, True) # with fyborg fyborg.fyborg(trkFile, mappedTrkFile, [fyborg.FyMapAction('test', volFile)]) # now validate s = io.loadTrk(mappedTrkFile) tracks = s[0] origHeader = s[1] scalars = tracks[0][1] print scalars[0], '==', testArr[2][5][6] print scalars[1], '==', testArr[3][5][7] print scalars[2], '==', testArr[2][6][7] print scalars[3], '==', testArr[8][7][3] print scalars[4], '==', testArr[9][5][4]
def run( self, input, output, matrix, jobs ): ''' ''' if os.path.exists( output ): # abort if file already exists c.error( 'File ' + str( output ) + ' already exists..' ) c.error( 'Aborting..' ) sys.exit( 2 ) if not os.path.isfile( matrix ): # abort if the matrix does not exist c.error( 'Matrix-File ' + str( matrix ) + ' does not exist..' ) c.error( 'Aborting..' ) sys.exit( 2 ) jobs = int( jobs ) if jobs < 1 or jobs > 32: jobs = 1 # read c.info( 'Loading ' + input + '..' ) t = io.loadTrk( input ) tracks = t[0] header = t[1] #.. copy the current header newHeader = numpy.copy( header ) # print old matrix in header # # WARNING: this matrix is actually never used by TrackVis (see email from Ruopeng). # We still modify it to keep it in sync with the transformations which we apply point wise. # if hasattr( header, 'vox_to_ras' ): oldMatrix = header['vox_to_ras'] c.info( 'Old transformation matrix:' ) c.info( ' ' + str( oldMatrix[0] ) ) c.info( ' ' + str( oldMatrix[1] ) ) c.info( ' ' + str( oldMatrix[2] ) ) c.info( ' ' + str( oldMatrix[3] ) ) # # load our transformation Matrix # newMatrix = numpy.loadtxt( matrix, float, '#', ' ' ) # # THREADED COMPONENT # numberOfThreads = jobs c.info( 'Splitting the input into ' + str( jobs ) + ' pieces..' ) splittedOutputTracks = u.split_list( tracks, numberOfThreads ) # list of threads t = [None] * numberOfThreads # list of alive flags a = [None] * numberOfThreads # list of tempFiles f = [None] * numberOfThreads for n in xrange( numberOfThreads ): # mark thread as alive a[n] = True # fire the thread and give it a filename based on the number tmpFile = tempfile.mkstemp( '.trk', 't_transform' )[1] f[n] = tmpFile t[n] = Process( target=TrackvisTransformLogic.transform, args=( splittedOutputTracks[n][:], newMatrix, tmpFile, False, 'Thread-' + str( n + 1 ) ) ) c.info( "Starting Thread-" + str( n + 1 ) + "..." ) t[n].start() allDone = False while not allDone: time.sleep( 1 ) for n in xrange( numberOfThreads ): a[n] = t[n].is_alive() if not any( a ): # if no thread is alive allDone = True # # END OF THREADED COMPONENT # c.info( "All Threads done!" ) c.info( "Merging output.." ) # now read all the created tempFiles and merge'em to one # first thread output is the master here tmpMaster = f[0] tMasterTracks = io.loadTrk( tmpMaster ) for tmpFileNo in xrange( 1, len( f ) ): tTracks = io.loadTrk( f[tmpFileNo] ) # add them tracks = TrackvisCalcLogic.add( tMasterTracks[0], tTracks[0] ) c.info( "Merging done!" ) # # replace the matrix in the header with a transformed one even if it will never be used by TrackVis # if hasattr( header, 'vox_to_ras' ): result = numpy.dot( oldMatrix, newMatrix ) c.info( 'New transformation matrix:' ) c.info( ' ' + str( result[0] ) ) c.info( ' ' + str( result[1] ) ) c.info( ' ' + str( result[2] ) ) c.info( ' ' + str( result[3] ) ) newHeader['vox_to_ras'] = result # write c.info( 'Saving ' + output + '..' ) io.saveTrk( output, tracks, newHeader ) c.info( 'All done!' )
def fyborg( trkFile, outputTrkFile, actions, *args ): if not actions: c.error( "We gotta do something.." ) return showDebug = 'debug' in args singleThread = 'singlethread' in args c.debug( "trkFile:" + str( trkFile ), showDebug ) c.debug( "outputTrkFile:" + str( outputTrkFile ), showDebug ) c.debug( "args:" + str( args ), showDebug ) # load trk file s = io.loadTrk( trkFile ) tracks = s[0] origHeader = s[1] tracksHeader = numpy.copy( s[1] ) numberOfScalars = origHeader['n_scalars'] scalars = origHeader['scalar_name'].tolist() numberOfTracks = origHeader['n_count'] # show some file informations printTrkInfo( tracksHeader, trkFile ) # grab the scalarNames scalarNames = [] for a in actions: if a.scalarName() != FyAction.NoScalar: scalarNames.append( a.scalarName() ) # increase the number of scalars tracksHeader['n_scalars'] += len( scalarNames ) # .. attach the new scalar names for i in range( len( scalarNames ) ): tracksHeader['scalar_name'][numberOfScalars + i] = scalarNames[i] # # THREADED COMPONENT # if singleThread: numberOfThreads = 1 else: numberOfThreads = multiprocessing.cpu_count() c.info( 'Splitting master into ' + str( numberOfThreads ) + ' pieces..' ) splittedOutputTracks = u.split_list( tracks[:], numberOfThreads ) # list of threads t = [None] * numberOfThreads # list of alive flags a = [None] * numberOfThreads # list of tempFiles f = [None] * numberOfThreads for n in xrange( numberOfThreads ): # configure actions __actions = [] for act in actions: __actions.append( act ) # mark thread as alive a[n] = True # fire the thread and give it a filename based on the number tmpFile = tempfile.mkstemp( '.trk', 'fyborg' )[1] f[n] = tmpFile t[n] = Process( target=fyborgLooper_, args=( splittedOutputTracks[n][:], tracksHeader, tmpFile, __actions, showDebug, n + 1 ) ) c.info( "Starting Thread-" + str( n + 1 ) + "..." ) t[n].start() allDone = False while not allDone: time.sleep( 1 ) for n in xrange( numberOfThreads ): a[n] = t[n].is_alive() if not any( a ): # if no thread is alive allDone = True # # END OF THREADED COMPONENT # c.info( "All Threads done!" ) # # Merging stage # c.info( "Merging tracks.." ) outputTracks = [] # now read all the created tempFiles and merge'em to one for tmpFileNo in xrange( 0, len( f ) ): tTracks = io.loadTrk( f[tmpFileNo] ) # add them outputTracks.extend( tTracks[0] ) c.info( "Merging done!" ) io.saveTrk( outputTrkFile, outputTracks, tracksHeader, None, True ) c.info( "All done!" )
def run(self, input, output, mode, verbose, jobs): if len(input) < 2: c.error('Please specify at least two *.trk files as input!') sys.exit(2) if os.path.exists(output): # abort if file already exists c.error('File ' + str(output) + ' already exists..') c.error('Aborting..') sys.exit(2) jobs = int(jobs) if jobs < 1 or jobs > 32: jobs = 1 # load 'master' mTracks = io.loadTrk(input[0]) # copy the tracks and the header from the 'master' c.info('Master is ' + input[0]) outputTracks = mTracks[0] c.info('Number of tracks: ' + str(len(outputTracks))) header = mTracks[1] # remove the first input input.pop(0) if mode == 'add': # # ADD # for i in input: iTracks = io.loadTrk(i) # add the tracks c.debug( 'Adding ' + str(len(iTracks[0])) + ' tracks from ' + i + ' to master..', verbose) outputTracks = TrackvisCalcLogic.add(outputTracks, iTracks[0]) c.debug( 'Number of output tracks after final addition: ' + str(len(outputTracks)), verbose) elif mode == 'sub': # # SUB # c.debug('Using ' + str(jobs) + ' threads..', verbose) mergedOutputTracks = outputTracks[:] for i in input: iTracks = io.loadTrk(i) # subtract the tracks c.info('Subtracting ' + i + ' (' + str(len(iTracks[0])) + ' tracks) from master..') # # THREADED COMPONENT # numberOfThreads = jobs c.info('Splitting master into ' + str(jobs) + ' pieces..') splittedOutputTracks = u.split_list(mergedOutputTracks, numberOfThreads) # list of threads t = [None] * numberOfThreads # list of alive flags a = [None] * numberOfThreads # list of tempFiles f = [None] * numberOfThreads for n in xrange(numberOfThreads): # mark thread as alive a[n] = True # fire the thread and give it a filename based on the number tmpFile = tempfile.mkstemp('.trk', 't_calc')[1] f[n] = tmpFile t[n] = Process(target=TrackvisCalcLogic.sub, args=(splittedOutputTracks[n][:], iTracks[0][:], tmpFile, verbose, 'Thread-' + str(n + 1))) c.info("Starting Thread-" + str(n + 1) + "...") t[n].start() allDone = False while not allDone: time.sleep(1) for n in xrange(numberOfThreads): a[n] = t[n].is_alive() if not any(a): # if no thread is alive allDone = True # # END OF THREADED COMPONENT # c.info("All Threads done!") c.info("Merging output..") # now read all the created tempFiles and merge'em to one # first thread output is the master here tmpMaster = f[0] tMasterTracks = io.loadTrk(tmpMaster) for tmpFileNo in xrange(1, len(f)): tTracks = io.loadTrk(f[tmpFileNo]) # add them mergedOutputTracks = TrackvisCalcLogic.add( tMasterTracks[0], tTracks[0]) c.info("Merging done!") # some stats c.info('Number of output tracks after final removal: ' + str(len(mergedOutputTracks))) outputTracks = mergedOutputTracks # now save the outputTracks io.saveTrk(output, outputTracks, header) c.info('All done!')
save_image( img, volFile ) # trk file fibers = [] # 2,5,6 # 3,5,7 # 2,6,7 # 8,7,3 # 9,5,4 points = np.array( [[2, 5, 6], [3, 5, 7], [2, 6, 7], [8, 7, 3], [9, 5, 4]], dtype=np.float32 ) fibers.append( ( points, None, None ) ) io.saveTrk( trkFile, fibers, None, None, True ) # with fyborg fyborg.fyborg( trkFile, mappedTrkFile, [fyborg.FyMapAction( 'test', volFile )] ) # now validate s = io.loadTrk( mappedTrkFile ) tracks = s[0] origHeader = s[1] scalars = tracks[0][1] print scalars[0], '==', testArr[2][5][6] print scalars[1], '==', testArr[3][5][7] print scalars[2], '==', testArr[2][6][7] print scalars[3], '==', testArr[8][7][3] print scalars[4], '==', testArr[9][5][4]
def validateMapping( volumefile, trkfile, radius=0, map_intermediate=True ): ''' Check if a trk file has correctly mapped scalar values from a volume file. If radius is > 0 take it into account by looking for the most common value in a sphere around the original point. This only happens on start and end points so. If map_intermediate is active, also the points between end points are validated but never using the radius. Returns TRUE if everything is fine, FALSE if there were errors. ''' # load the mapped trk file s = io.loadTrk( trkfile ) volume = io.readImage( volumefile ) imageHeader = volume.header image_size = volume.shape[:3] # grab the tracks tracks = s[0] # pad the image with zeros image = ap.pad( volume, radius, 'constant', constant_values=( 0 ) ) # any errors? any_errors = False # incorporate spacing spacing = imageHeader.get_zooms()[:3] # .. and loop through them for t in tracks: points = t[0] # the points of this fiber track scalars = t[1] # the mapped scalars for index, p in enumerate( points ): current_point = [ int( a / b ) for a, b in zip( [p[0], p[1], p[2]], spacing )] #print 'ORIG', volume[current_point[0], current_point[1], current_point[2]] is_first_point = ( index == 0 ) is_last_point = ( index == len( points ) - 1 ) # if this is if not map_intermediate and not is_first_point and not is_last_point: real_scalar = 0.0 else: # here we check for the neighborhood if radius > 0 if radius > 0 and ( is_first_point or is_last_point ): # neighborhood search! r = radius a, b, c = current_point # crop the image according to the neighborhood look-up # since we zero-padded the image, we don't need boundary checks here min_x = a - r max_x = a + r + 1 min_y = b - r max_y = b + r + 1 min_z = c - r max_z = c + r + 1 cropped_image = numpy.asarray( image[min_x + r:max_x + r, min_y + r:max_y + r, min_z + r:max_z + r] ) # create a sphere mask x, y, z = numpy.ogrid[0:2 * r + 1, 0:2 * r + 1, 0:2 * r + 1] mask = ( x - r ) ** 2 + ( y - r ) ** 2 + ( z - r ) ** 2 <= r * r # 3d sphere mask # apply the mask masked_container = cropped_image[mask] # throw away all zeros (0) masked_container = masked_container[numpy.nonzero( masked_container )] # find the most frequent label in the masked container from collections import Counter # by default, we use the original one mostFrequentLabel = volume[a, b, c] if len( masked_container ) != 0: counter = Counter( masked_container ) all_labels = counter.most_common() best_match_label = counter.most_common( 1 ) original_pos = [i for i, v in enumerate( all_labels ) if v[0] == mostFrequentLabel] if not original_pos or all_labels[original_pos[0]][1] != best_match_label[0][1]: # the original label appears less often as the new best_match_label # in this case, we use the new best matched label mostFrequentLabel = best_match_label[0][0] # we don't need an else here since the original label is already set real_scalar = mostFrequentLabel else: # simple mapping without radius incorporation and make sure we are inside the volume real_scalar = volume[min( current_point[0], image_size[0] - 1 ), min( current_point[1], image_size[1] - 1 ) , min( current_point[2], image_size[2] - 1 )] if type( scalars ) is types.NoneType: mapped_scalar = -1 else: mapped_scalar = scalars[index][0] # now check if the mapped scalar from the trk file matches the real scalar compare = ( mapped_scalar == real_scalar ) if compare: compare = Colors.GREEN + 'OK' else: compare = Colors.RED + 'WRONG!!!' any_errors = True print Colors.PURPLE + 'Probing ' + Colors.CYAN + str( current_point ) + Colors.PURPLE + ' for scalar.. SHOULD BE: ' + Colors.CYAN + str( real_scalar ) + Colors.PURPLE + ' WAS: ' + Colors.CYAN + str( mapped_scalar ) + Colors.PURPLE + ' ... ' + str( compare ) + Colors._CLEAR # return TRUE if everything went fine and FALSE if there were errors return not any_errors
def run(self, masterFile, inputFiles, outputDirectory, spacing, dimensions, likefreesurfer, nii): ''' Performs the equalization ''' # sanity checks outputDirectory = os.path.normpath(outputDirectory) # prepare the output directory if os.path.exists(outputDirectory): c.error('The output directory already exists!') c.error('Aborting..') sys.exit(2) # create the output directory os.mkdir(outputDirectory) # MASTER masterFile = os.path.normpath(masterFile) # read the master master = io.readImage(masterFile) c.info('MASTER IMAGE: ' + str(masterFile)) # INPUTS for i in range(len(inputFiles)): inputFiles[i] = os.path.normpath(inputFiles[i]) c.info('INPUT IMAGE ' + str(i + 1) + ': ' + str(inputFiles[i])) # print more info c.info('OUTPUT DIRECTORY: ' + str(outputDirectory)) if likefreesurfer: spacing = '1,1,1' dimensions = '256,256,256' if spacing != 'no': c.info('SET SPACINGS: ' + str(spacing)) if dimensions != 'no': c.info('SET DIMENSIONS: ' + str(dimensions)) # re-sample master to obtain an isotropic dataset master = self.aniso2iso(master, spacing, dimensions) masterFileBasename = os.path.split(masterFile)[1] masterFileBasenameWithoutExt = os.path.splitext(masterFileBasename)[0] if not nii: masterOutputFileName = os.path.join(outputDirectory, masterFileBasename) else: masterOutputFileName = os.path.join( outputDirectory, masterFileBasenameWithoutExt) + '.nii' io.saveImage(masterOutputFileName, master) # equalize all images to the master for i in range(len(inputFiles)): currentInputFile = inputFiles[i] c.info('Equalizing ' + str(currentInputFile) + ' to ' + str(masterFile) + "...") # load the image currentImage = io.readImage(currentInputFile) currentImageHeader = currentImage.header c.info(' old spacing: ' + str(currentImageHeader.get_zooms())) c.info(' old dimensions: ' + str(currentImage.shape[:3])) # now resample resampledImage = resampler.resample_img2img(currentImage, master) # .. and save it currentInputFileBasename = os.path.split(currentInputFile)[1] currentInputFileBasenameWithoutExt = os.path.splitext( currentInputFileBasename)[0] if not nii: outputFileName = os.path.join(outputDirectory, currentInputFileBasename) else: outputFileName = os.path.join( outputDirectory, currentInputFileBasenameWithoutExt) savedImage = io.saveImage(outputFileName, resampledImage) #c.info( ' new spacing: ' + str( savedImageHeader.get_zooms() ) ) c.info(' new dimensions: ' + str(savedImage.shape[:3])) c.info('All done!')
def fyborgLooper_( tracks, tracksHeader, outputTrkFile, actions, showDebug, threadNumber ): import numpy numberOfTracks = len( tracks ) # the buffer for the new tracks newTracks = [] # now loop through the tracks for tCounter, t in enumerate( tracks ): # some debug stats c.debug( 'Thread-' + str( threadNumber ) + ': Processing ' + str( tCounter + 1 ) + '/' + str( numberOfTracks ), showDebug ) # generate a unique ID for this track uniqueId = str( threadNumber ) + str( tCounter ) tCoordinates = t[0] tScalars = t[1] # buffer for fiberScalars _fiberScalars = {} # first round: mapping per fiber # .. execute each action and buffer return value (scalar) for a in actions: value = a.scalarPerFiber( uniqueId, tCoordinates, tScalars ) _fiberScalars[a.scalarName()] = value # # Coordinate Loop # # buffer for coordinate scalars) scalars = [] # second round: mapping per coordinate for cCounter, coords in enumerate( tCoordinates ): _coordScalars = {} _mergedScalars = [] # this is the actual buffer for ordered fiber and coord scalars merged together # .. execute each action and buffer return value (scalar) for a in actions: value = a.scalarPerCoordinate( uniqueId, coords[0], coords[1], coords[2] ) # pass x,y,z _coordScalars[a.scalarName()] = value # now merge the old scalars and the fiber and coord scalars # this preserves the ordering of the configured actions if tScalars != None: _mergedScalars.extend( tScalars[cCounter] ) for a in actions: value = _fiberScalars[a.scalarName()] if value != FyAction.NoScalar: _mergedScalars.append( value ) else: # no fiber scalar, check if there is a coord scalar value = _coordScalars[a.scalarName()] if value != FyAction.NoScalar: _mergedScalars.append( value ) # attach scalars scalars.append( _mergedScalars ) # validate the fibers using the action's validate methods validator = [] for a in actions: validator.append( a.validate( uniqueId ) ) if all( validator ): # this is a valid fiber # .. add the new track with the coordinates, the new scalar array and the properties newScalars = numpy.asarray( scalars ) newTracks.append( ( t[0], newScalars, t[2] ) ) # save everything io.saveTrk( outputTrkFile, newTracks, tracksHeader, None, True )