コード例 #1
0
ファイル: fy.py プロジェクト: FNNDSC/fyborg
  def filtering( self, inputs, outputs, length, cortex_only ):
    '''
    Filter the mapped fibers.
    '''

    # check if we have all required input data
    # we need at least: 
    #  - outputs['fibers_mapped'] == Track file in T1 space with mapped scalars
    if not os.path.exists( outputs['fibers_mapped'] ):
      c.error( Colors.RED + 'Could not find ' + Colors.YELLOW + outputs['fibers_mapped'] + Colors.RED + ' but we really need it to start with stage 4!!' + Colors._CLEAR )
      sys.exit( 2 )

    # find the order of the mapped scalars
    header = io.loadTrkHeaderOnly( outputs['fibers_mapped'] )
    scalars = list( header['scalar_name'] )

    # split the length range
    length = length.split( ' ' )
    min_length = int( length[0] )
    max_length = int( length[1] )

    # length filtering

    c.info( Colors.YELLOW + '  Filtering ' + Colors.PURPLE + 'fiber length' + Colors.YELLOW + ' to be ' + Colors.PURPLE + '>' + str( min_length ) + ' and <' + str( max_length ) + Colors.YELLOW + ' for ' + Colors.PURPLE + os.path.split( outputs['fibers_mapped'] )[1] + Colors.YELLOW + ' and store as ' + Colors.PURPLE + os.path.split( outputs['fibers_mapped_length_filtered'] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )
    fyborg.fyborg( outputs['fibers_mapped'], outputs['fibers_mapped_length_filtered'], [FyFilterLengthAction( scalars.index( 'length' ), min_length, max_length )] )

    header = io.loadTrkHeaderOnly( outputs['fibers_mapped_length_filtered'] )
    new_count = header['n_count']

    c.info( Colors.YELLOW + '  Number of tracks after ' + Colors.PURPLE + 'length filtering' + Colors.YELLOW + ': ' + str( new_count ) + Colors.YELLOW + Colors._CLEAR )

    if cortex_only:

      # special cortex filtering

      c.info( Colors.YELLOW + '  Filtering for ' + Colors.PURPLE + 'valid cortex structures' + Colors.YELLOW + ' in ' + Colors.PURPLE + os.path.split( outputs['fibers_mapped_length_filtered'] )[1] + Colors.YELLOW + ' and store as ' + Colors.PURPLE + os.path.split( outputs['fibers_mapped_length_filtered_cortex_only'] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )
      c.info( Colors.PURPLE + '    Conditions for valid fibers:' + Colors._CLEAR )
      c.info( Colors.PURPLE + '    1.' + Colors.YELLOW + ' The fiber track has to pass through the cerebral white matter. (Label values: ' + Colors.PURPLE + '[2, 41]' + Colors.YELLOW + ')' + Colors._CLEAR )
      c.info( Colors.PURPLE + '    2.' + Colors.YELLOW + ' The fiber track shall only touch sub-cortical structures not more than ' + Colors.PURPLE + '5 times' + Colors.YELLOW + '. (Label values: ' + Colors.PURPLE + '[10, 49, 16, 28, 60, 4, 43]' + Colors.YELLOW + ')' + Colors._CLEAR )
      c.info( Colors.PURPLE + '    3.' + Colors.YELLOW + ' The track shall not pass through the corpus callosum (Labels: ' + Colors.PURPLE + '[251, 255]' + Colors.YELLOW + ') and end in the same hemisphere (Labels: ' + Colors.PURPLE + '[1000-1035]' + Colors.YELLOW + ' for left, ' + Colors.PURPLE + '[2000-2035]' + Colors.YELLOW + ' for right).' + Colors._CLEAR )

      fyborg.fyborg( outputs['fibers_mapped_length_filtered'], outputs['fibers_mapped_length_filtered_cortex_only'], [FyFilterCortexAction( scalars.index( 'segmentation' ) )] )

      header = io.loadTrkHeaderOnly( outputs['fibers_mapped_length_filtered_cortex_only'] )
      new_count = header['n_count']

      c.info( Colors.YELLOW + '  Number of tracks after ' + Colors.PURPLE + 'cortex filtering' + Colors.YELLOW + ': ' + str( new_count ) + Colors.YELLOW + Colors._CLEAR )

      c.info( Colors.YELLOW + '  Copied filtered tracks from ' + Colors.PURPLE + os.path.split( outputs['fibers_mapped_length_filtered_cortex_only'] )[1] + Colors.YELLOW + ' to ' + Colors.PURPLE + os.path.split( outputs['fibers_final'] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )
      shutil.copyfile( outputs['fibers_mapped_length_filtered_cortex_only'], outputs['fibers_final'] )

    else:

      c.info( Colors.YELLOW + '  Info: ' + Colors.PURPLE + 'Cortical _and_ sub-cortical structures ' + Colors.YELLOW + 'will be included..' + Colors._CLEAR )

      c.info( Colors.YELLOW + '  Copied filtered tracks from ' + Colors.PURPLE + os.path.split( outputs['fibers_mapped_length_filtered'] )[1] + Colors.YELLOW + ' to ' + Colors.PURPLE + os.path.split( outputs['fibers_final'] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )
      shutil.copyfile( outputs['fibers_mapped_length_filtered'], outputs['fibers_final'] )
コード例 #2
0
  def setupGrid( self, matrix ):

    if self.__test:
      self.__rows = 101
      self.__cols = 101

      self.__gridWidget = GridView( self, self.__rows, self.__cols, False )
      self.__layout.addWidget( self.__gridWidget, 0, 0 )

      b_overwriteSpectralValue = True
      maxEnergy = 255 / 3
      automaton = C_spectrum_CAM_RGB( maxQuanta=maxEnergy )
      automaton.component_add( 'R', maxEnergy / 3, b_overwriteSpectralValue )
      automaton.component_add( 'G', maxEnergy / 3, b_overwriteSpectralValue )
      automaton.component_add( 'B', maxEnergy / 3, b_overwriteSpectralValue )
      automaton.updateRule_changeAmount(self.__updateAmount)

      world = C_CAE( np.array( ( self.__rows, self.__cols ) ), automaton )
      world.verbosity_set( 1 )
      arr_world = np.zeros( ( self.__rows, self.__cols ) )
      arr_world[0, 0] = 1
      arr_world[50, 50] = maxEnergy / 3 + 1
      arr_world[100, 100] = maxEnergy / 3 * 2 + 1

    elif matrix:
      maxEnergy = 255

      arr_worldRaw = np.loadtxt( matrix, float, '#', '\t' )
      arr_world = misc.arr_normalize( arr_worldRaw, scale=maxEnergy )

      self.__rows, self.__cols = arr_world.shape

      self.__gridWidget = GridView( self, self.__rows, self.__cols, False )
      self.__layout.addWidget( self.__gridWidget, 0, 0 )

      b_overwriteSpectralValue = True
      automaton = C_spectrum_CAM_RGB( maxQuanta=maxEnergy )
      automaton.component_add( 'R', maxEnergy / 3, b_overwriteSpectralValue )
      automaton.component_add( 'G', maxEnergy / 3, b_overwriteSpectralValue )
      automaton.component_add( 'B', maxEnergy / 3, b_overwriteSpectralValue )
      print "Update amount = %d" % self.__updateAmount
      automaton.updateRule_changeAmount(self.__updateAmount)

      world = C_CAE( np.array( ( self.__rows, self.__cols ) ), automaton )
      world.verbosity_set( 1 )

    else:
      c.error( 'No test mode and no matrix..' )
      sys.exit()

    print arr_world
    world.initialize( arr_world )

    self.__world = world
コード例 #3
0
ファイル: fy.py プロジェクト: FNNDSC/fyborg
  def mapping( self, inputs, outputs, radius ):
    '''
    Map all detected scalar volumes to each fiber.
    '''
    # check if we have all required input data
    # we need at least: 
    #  - outputs['fibers'] == Track file in T1 space
    #  - outputs['segmentation'] == Label Map
    if not os.path.exists( outputs['fibers'] ):
      c.error( Colors.RED + 'Could not find ' + Colors.YELLOW + outputs['fibers'] + Colors.RED + ' but we really need it to start with stage 3!!' + Colors._CLEAR )
      sys.exit( 2 )
    if not os.path.exists( outputs['segmentation'] ):
      c.error( Colors.RED + 'Could not find ' + Colors.YELLOW + outputs['segmentation'] + Colors.RED + ' but we really need it to start with stage 3!!' + Colors._CLEAR )
      sys.exit( 2 )

    actions = []

    for i in inputs:

      if i == 'fibers' or i == 'segmentation' or i == 'T1' or i == 'b0':
        # we do not map these
        continue

      if not os.path.exists( outputs[i + '_T1_space'] ):
        # we can't map this since we didn't find the file
        continue

      # for normal scalars: append it to the actions
      actions.append( FyMapAction( i, outputs[i + '_T1_space'] ) )

      c.info( Colors.YELLOW + '  Configuring mapping of ' + Colors.PURPLE + os.path.split( outputs[i + '_T1_space'] )[1] + Colors.YELLOW + ' to ' + Colors.PURPLE + os.path.split( outputs['fibers'] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )

    # now the segmentation with the lookaround radius
    actions.append( FyRadiusMapAction( 'segmentation', outputs['segmentation'], radius ) )
    c.info( Colors.YELLOW + '  Configuring mapping of ' + Colors.PURPLE + os.path.split( outputs['segmentation'] )[1] + Colors.YELLOW + ' to ' + Colors.PURPLE + os.path.split( outputs['fibers'] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )

    # and also the fiber length
    actions.append( FyLengthAction() )
    c.info( Colors.YELLOW + '  Configuring mapping of ' + Colors.PURPLE + 'fiber length' + Colors.YELLOW + ' to ' + Colors.PURPLE + os.path.split( outputs['fibers'] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )

    # run, forest, run!!
    c.info( Colors.YELLOW + '  Performing configured mapping for ' + Colors.PURPLE + os.path.split( outputs['fibers'] )[1] + Colors.YELLOW + ' and storing as ' + Colors.PURPLE + os.path.split( outputs['fibers_mapped'] )[1] + Colors.YELLOW + ' (~ 30 minutes)!' + Colors._CLEAR )
    if self.__debug:
      fyborg.fyborg( outputs['fibers'], outputs['fibers_mapped'], actions, 'debug' )
    else:
      fyborg.fyborg( outputs['fibers'], outputs['fibers_mapped'], actions )
コード例 #4
0
ファイル: equalizer.py プロジェクト: khmermega/scripts
    def run(self, masterFile, inputFiles, outputDirectory, spacing, dimensions,
            likefreesurfer, nii):
        '''
    Performs the equalization
    '''

        # sanity checks
        outputDirectory = os.path.normpath(outputDirectory)
        # prepare the output directory
        if os.path.exists(outputDirectory):
            c.error('The output directory already exists!')
            c.error('Aborting..')
            sys.exit(2)
        # create the output directory
        os.mkdir(outputDirectory)

        # MASTER
        masterFile = os.path.normpath(masterFile)
        # read the master
        master = io.readImage(masterFile)
        c.info('MASTER IMAGE: ' + str(masterFile))

        # INPUTS
        for i in range(len(inputFiles)):
            inputFiles[i] = os.path.normpath(inputFiles[i])
            c.info('INPUT IMAGE ' + str(i + 1) + ': ' + str(inputFiles[i]))

        # print more info
        c.info('OUTPUT DIRECTORY: ' + str(outputDirectory))

        if likefreesurfer:
            spacing = '1,1,1'
            dimensions = '256,256,256'

        if spacing != 'no':
            c.info('SET SPACINGS: ' + str(spacing))

        if dimensions != 'no':
            c.info('SET DIMENSIONS: ' + str(dimensions))

        # re-sample master to obtain an isotropic dataset
        master = self.aniso2iso(master, spacing, dimensions)
        masterFileBasename = os.path.split(masterFile)[1]
        masterFileBasenameWithoutExt = os.path.splitext(masterFileBasename)[0]

        if not nii:
            masterOutputFileName = os.path.join(outputDirectory,
                                                masterFileBasename)
        else:
            masterOutputFileName = os.path.join(
                outputDirectory, masterFileBasenameWithoutExt) + '.nii'
        io.saveImage(masterOutputFileName, master)

        # equalize all images to the master
        for i in range(len(inputFiles)):
            currentInputFile = inputFiles[i]

            c.info('Equalizing ' + str(currentInputFile) + ' to ' +
                   str(masterFile) + "...")

            # load the image
            currentImage = io.readImage(currentInputFile)
            currentImageHeader = currentImage.header
            c.info('    old spacing: ' + str(currentImageHeader.get_zooms()))
            c.info('    old dimensions: ' + str(currentImage.shape[:3]))

            # now resample
            resampledImage = resampler.resample_img2img(currentImage, master)

            # .. and save it
            currentInputFileBasename = os.path.split(currentInputFile)[1]
            currentInputFileBasenameWithoutExt = os.path.splitext(
                currentInputFileBasename)[0]
            if not nii:
                outputFileName = os.path.join(outputDirectory,
                                              currentInputFileBasename)
            else:
                outputFileName = os.path.join(
                    outputDirectory, currentInputFileBasenameWithoutExt)

            savedImage = io.saveImage(outputFileName, resampledImage)
            #c.info( '    new spacing: ' + str( savedImageHeader.get_zooms() ) )
            c.info('    new dimensions: ' + str(savedImage.shape[:3]))

        c.info('All done!')
コード例 #5
0
ファイル: t_calc.py プロジェクト: FNNDSC/scripts
    def run(self, input, output, mode, verbose, jobs):

        if len(input) < 2:
            c.error("Please specify at least two *.trk files as input!")
            sys.exit(2)

        if os.path.exists(output):
            # abort if file already exists
            c.error("File " + str(output) + " already exists..")
            c.error("Aborting..")
            sys.exit(2)

        jobs = int(jobs)

        if jobs < 1 or jobs > 32:
            jobs = 1

        # load 'master'
        mTracks = io.loadTrk(input[0])

        # copy the tracks and the header from the 'master'
        c.info("Master is " + input[0])
        outputTracks = mTracks[0]
        c.info("Number of tracks: " + str(len(outputTracks)))
        header = mTracks[1]

        # remove the first input
        input.pop(0)

        if mode == "add":
            #
            # ADD
            #

            for i in input:
                iTracks = io.loadTrk(i)

                # add the tracks
                c.debug("Adding " + str(len(iTracks[0])) + " tracks from " + i + " to master..", verbose)
                outputTracks = TrackvisCalcLogic.add(outputTracks, iTracks[0])

            c.debug("Number of output tracks after final addition: " + str(len(outputTracks)), verbose)

        elif mode == "sub":
            #
            # SUB
            #

            c.debug("Using " + str(jobs) + " threads..", verbose)

            mergedOutputTracks = outputTracks[:]

            for i in input:
                iTracks = io.loadTrk(i)

                # subtract the tracks
                c.info("Subtracting " + i + " (" + str(len(iTracks[0])) + " tracks) from master..")

                #
                # THREADED COMPONENT
                #
                numberOfThreads = jobs
                c.info("Splitting master into " + str(jobs) + " pieces..")
                splittedOutputTracks = u.split_list(mergedOutputTracks, numberOfThreads)

                # list of threads
                t = [None] * numberOfThreads

                # list of alive flags
                a = [None] * numberOfThreads

                # list of tempFiles
                f = [None] * numberOfThreads

                for n in xrange(numberOfThreads):
                    # mark thread as alive
                    a[n] = True
                    # fire the thread and give it a filename based on the number
                    tmpFile = tempfile.mkstemp(".trk", "t_calc")[1]
                    f[n] = tmpFile
                    t[n] = Process(
                        target=TrackvisCalcLogic.sub,
                        args=(splittedOutputTracks[n][:], iTracks[0][:], tmpFile, verbose, "Thread-" + str(n + 1)),
                    )
                    c.info("Starting Thread-" + str(n + 1) + "...")
                    t[n].start()

                allDone = False

                while not allDone:

                    time.sleep(1)

                    for n in xrange(numberOfThreads):

                        a[n] = t[n].is_alive()

                    if not any(a):
                        # if no thread is alive
                        allDone = True

                #
                # END OF THREADED COMPONENT
                #
                c.info("All Threads done!")

                c.info("Merging output..")
                # now read all the created tempFiles and merge'em to one
                # first thread output is the master here
                tmpMaster = f[0]
                tMasterTracks = io.loadTrk(tmpMaster)
                for tmpFileNo in xrange(1, len(f)):
                    tTracks = io.loadTrk(f[tmpFileNo])

                    # add them
                    mergedOutputTracks = TrackvisCalcLogic.add(tMasterTracks[0], tTracks[0])

                c.info("Merging done!")

            # some stats
            c.info("Number of output tracks after final removal: " + str(len(mergedOutputTracks)))
            outputTracks = mergedOutputTracks

        # now save the outputTracks
        io.saveTrk(output, outputTracks, header)

        c.info("All done!")
コード例 #6
0
ファイル: makeMatrix.py プロジェクト: FNNDSC/fyborg
def makeMatrix(inputs, outputs, no_cortex):
    """
  Make 1/ADC, ADC, FA, FiberNumber, FiberLength, E1, E2, E3 connectivity matrices.
  """

    s = io.loadTrk(outputs["fibers_final"])
    tracks = s[0]
    header = s[1]

    scalarNames = header["scalar_name"].tolist()
    matrix = {}

    # check if the segmentation is mapped
    try:
        scalarNames.index("segmentation")
    except:
        c.error(Colors.RED)

    for s in scalarNames:

        if not s:
            continue

        print s

    return

    for i in inputs:

        if i == "fibers" or i == "segmentation" or i == "T1" or i == "b0":
            # we do not map these
            continue

    # for tCounter, t in enumerate( tracks ):

    try:
        labelIndex = scalarNames.index("segmentation")
        adcIndex = scalarNames.index("adc")
        faIndex = scalarNames.index("fa")
        e1Index = scalarNames.index("e1")
        e2Index = scalarNames.index("e2")
        e3Index = scalarNames.index("e3")
        lengthIndex = scalarNames.index("length")
    except:
        c.error("Not all scalars were found: aparc_aseg_endlabel, adc, fa, length, e1, e2, e3")
        sys.exit(2)

    m_fn = numpy.zeros([68, 68])
    m_fa = numpy.zeros([68, 68])
    m_adc = numpy.zeros([68, 68])
    m_adcinv = numpy.zeros([68, 68])
    m_len = numpy.zeros([68, 68])
    m_e1 = numpy.zeros([68, 68])
    m_e2 = numpy.zeros([68, 68])
    m_e3 = numpy.zeros([68, 68])

    fslabel_vol = [
        2012,
        2019,
        2032,
        2014,
        2020,
        2018,
        2027,
        2028,
        2003,
        2024,
        2017,
        2026,
        2002,
        2023,
        2010,
        2022,
        2031,
        2029,
        2008,
        2025,
        2005,
        2021,
        2011,
        2013,
        2007,
        2016,
        2006,
        2033,
        2009,
        2015,
        2001,
        2030,
        2034,
        2035,
        1012,
        1019,
        1032,
        1014,
        1020,
        1018,
        1027,
        1028,
        1003,
        1024,
        1017,
        1026,
        1002,
        1023,
        1010,
        1022,
        1031,
        1029,
        1008,
        1025,
        1005,
        1021,
        1011,
        1013,
        1007,
        1016,
        1006,
        1033,
        1009,
        1015,
        1001,
        1030,
        1034,
        1035,
    ]

    for tCounter, t in enumerate(tracks):

        tCoordinates = t[0]
        tScalars = t[1]

        fa = numpy.mean(tScalars[:, faIndex])
        adc = numpy.mean(tScalars[:, adcIndex])
        e1 = numpy.mean(tScalars[:, e1Index])
        e2 = numpy.mean(tScalars[:, e2Index])
        e3 = numpy.mean(tScalars[:, e3Index])
        len = tScalars[0, lengthIndex]

        firstLabel = tScalars[0, labelIndex]
        lastLabel = tScalars[-1, labelIndex]

        try:
            fIndex = fslabel_vol.index(firstLabel)
            lIndex = fslabel_vol.index(lastLabel)
        except:
            continue

        print "found", firstLabel, lastLabel

        m_fn[fIndex, lIndex] += 1
        m_fa[fIndex, lIndex] += fa
        m_adc[fIndex, lIndex] += adc
        m_e1[fIndex, lIndex] += e1
        m_e2[fIndex, lIndex] += e2
        m_e3[fIndex, lIndex] += e3
        m_adcinv[fIndex, lIndex] += 1 / adc
        m_len[fIndex, lIndex] += len

    # symmetrize matrices
    m_fn = m_fn + m_fn.T - numpy.diag(m_fn.diagonal())
    m_fa = m_fa + m_fa.T - numpy.diag(m_fa.diagonal())
    m_adc = m_adc + m_adc.T - numpy.diag(m_adc.diagonal())
    m_e1 = m_e1 + m_e1.T - numpy.diag(m_e1.diagonal())
    m_e2 = m_e2 + m_e2.T - numpy.diag(m_e2.diagonal())
    m_e3 = m_e3 + m_e3.T - numpy.diag(m_e3.diagonal())
    m_adcinv = m_adcinv + m_adcinv.T - numpy.diag(m_adcinv.diagonal())
    m_len = m_len + m_len.T - numpy.diag(m_len.diagonal())

    # normalize matrices
    m_fa[:] /= m_fn[:]
    m_adc[:] /= m_fn[:]
    m_e1[:] /= m_fn[:]
    m_e2[:] /= m_fn[:]
    m_e3[:] /= m_fn[:]
    m_adcinv[:] /= m_fn[:]
    m_len[:] /= m_fn[:]
    m_fa = numpy.nan_to_num(m_fa)
    m_e1 = numpy.nan_to_num(m_e1)
    m_e2 = numpy.nan_to_num(m_e2)
    m_e3 = numpy.nan_to_num(m_e3)
    m_adc = numpy.nan_to_num(m_adc)
    m_adcinv = numpy.nan_to_num(m_adcinv)
    m_len = numpy.nan_to_num(m_len)

    # save as .mat and .csv
    sio.savemat(
        outputDirectory + "fibmap_all_cMatrix.mat",
        {
            "m_fiberNumber": m_fn,
            "m_fa": m_fa,
            "m_adc": m_adc,
            "m_adcInverse": m_adcinv,
            "m_fiberLength": m_len,
            "m_e1": m_e1,
            "m_e2": m_e2,
            "m_e3": m_e3,
        },
    )

    numpy.savetxt(outputDirectory + "fibmap_fibernumber_cMatrix.csv", m_fn, delimiter=",")
    numpy.savetxt(outputDirectory + "fibmap_fa_cMatrix.csv", m_fa, delimiter=",")
    numpy.savetxt(outputDirectory + "fibmap_e1_cMatrix.csv", m_e1, delimiter=",")
    numpy.savetxt(outputDirectory + "fibmap_e2_cMatrix.csv", m_e2, delimiter=",")
    numpy.savetxt(outputDirectory + "fibmap_e3_cMatrix.csv", m_e3, delimiter=",")
    numpy.savetxt(outputDirectory + "fibmap_adc_cMatrix.csv", m_adc, delimiter=",")
    numpy.savetxt(outputDirectory + "fibmap_adcinv_cMatrix.csv", m_adcinv, delimiter=",")
    numpy.savetxt(outputDirectory + "fibmap_fiberlength_cMatrix.csv", m_len, delimiter=",")

    c.info("Connectivity matrices generated and stored.")
コード例 #7
0
ファイル: t_transform.py プロジェクト: khmermega/scripts
  def run( self, input, output, matrix, jobs ):
    '''
    '''

    if os.path.exists( output ):
      # abort if file already exists
      c.error( 'File ' + str( output ) + ' already exists..' )
      c.error( 'Aborting..' )
      sys.exit( 2 )

    if not os.path.isfile( matrix ):
      # abort if the matrix does not exist
      c.error( 'Matrix-File ' + str( matrix ) + ' does not exist..' )
      c.error( 'Aborting..' )
      sys.exit( 2 )

    jobs = int( jobs )

    if jobs < 1 or  jobs > 32:
      jobs = 1

    # read
    c.info( 'Loading ' + input + '..' )

    t = io.loadTrk( input )
    tracks = t[0]
    header = t[1]
    #.. copy the current header
    newHeader = numpy.copy( header )

    # print old matrix in header
    # 
    # WARNING: this matrix is actually never used by TrackVis (see email from Ruopeng).
    # We still modify it to keep it in sync with the transformations which we apply point wise.
    #
    if hasattr( header, 'vox_to_ras' ):
      oldMatrix = header['vox_to_ras']
      c.info( 'Old transformation matrix:' )
      c.info( '    ' + str( oldMatrix[0] ) )
      c.info( '    ' + str( oldMatrix[1] ) )
      c.info( '    ' + str( oldMatrix[2] ) )
      c.info( '    ' + str( oldMatrix[3] ) )

    #
    # load our transformation Matrix
    #
    newMatrix = numpy.loadtxt( matrix, float, '#', ' ' )


    #
    # THREADED COMPONENT
    #
    numberOfThreads = jobs
    c.info( 'Splitting the input into ' + str( jobs ) + ' pieces..' )
    splittedOutputTracks = u.split_list( tracks, numberOfThreads )

    # list of threads
    t = [None] * numberOfThreads

    # list of alive flags
    a = [None] * numberOfThreads

    # list of tempFiles
    f = [None] * numberOfThreads

    for n in xrange( numberOfThreads ):
      # mark thread as alive
      a[n] = True
      # fire the thread and give it a filename based on the number
      tmpFile = tempfile.mkstemp( '.trk', 't_transform' )[1]
      f[n] = tmpFile
      t[n] = Process( target=TrackvisTransformLogic.transform, args=( splittedOutputTracks[n][:], newMatrix, tmpFile, False, 'Thread-' + str( n + 1 ) ) )
      c.info( "Starting Thread-" + str( n + 1 ) + "..." )
      t[n].start()

    allDone = False

    while not allDone:

      time.sleep( 1 )

      for n in xrange( numberOfThreads ):

        a[n] = t[n].is_alive()

      if not any( a ):
        # if no thread is alive
        allDone = True

    #
    # END OF THREADED COMPONENT
    #
    c.info( "All Threads done!" )

    c.info( "Merging output.." )
    # now read all the created tempFiles and merge'em to one
    # first thread output is the master here
    tmpMaster = f[0]
    tMasterTracks = io.loadTrk( tmpMaster )
    for tmpFileNo in xrange( 1, len( f ) ):
      tTracks = io.loadTrk( f[tmpFileNo] )

      # add them
      tracks = TrackvisCalcLogic.add( tMasterTracks[0], tTracks[0] )

    c.info( "Merging done!" )

    #
    # replace the matrix in the header with a transformed one even if it will never be used by TrackVis
    #
    if hasattr( header, 'vox_to_ras' ):
      result = numpy.dot( oldMatrix, newMatrix )
      c.info( 'New transformation matrix:' )
      c.info( '    ' + str( result[0] ) )
      c.info( '    ' + str( result[1] ) )
      c.info( '    ' + str( result[2] ) )
      c.info( '    ' + str( result[3] ) )
      newHeader['vox_to_ras'] = result

    # write
    c.info( 'Saving ' + output + '..' )
    io.saveTrk( output, tracks, newHeader )

    c.info( 'All done!' )
コード例 #8
0
    def run(self, input, output, mode, verbose, jobs):

        if len(input) < 2:
            c.error('Please specify at least two *.trk files as input!')
            sys.exit(2)

        if os.path.exists(output):
            # abort if file already exists
            c.error('File ' + str(output) + ' already exists..')
            c.error('Aborting..')
            sys.exit(2)

        jobs = int(jobs)

        if jobs < 1 or jobs > 32:
            jobs = 1

        # load 'master'
        mTracks = io.loadTrk(input[0])

        # copy the tracks and the header from the 'master'
        c.info('Master is ' + input[0])
        outputTracks = mTracks[0]
        c.info('Number of tracks: ' + str(len(outputTracks)))
        header = mTracks[1]

        # remove the first input
        input.pop(0)

        if mode == 'add':
            #
            # ADD
            #

            for i in input:
                iTracks = io.loadTrk(i)

                # add the tracks
                c.debug(
                    'Adding ' + str(len(iTracks[0])) + ' tracks from ' + i +
                    ' to master..', verbose)
                outputTracks = TrackvisCalcLogic.add(outputTracks, iTracks[0])

            c.debug(
                'Number of output tracks after final addition: ' +
                str(len(outputTracks)), verbose)

        elif mode == 'sub':
            #
            # SUB
            #

            c.debug('Using ' + str(jobs) + ' threads..', verbose)

            mergedOutputTracks = outputTracks[:]

            for i in input:
                iTracks = io.loadTrk(i)

                # subtract the tracks
                c.info('Subtracting ' + i + ' (' + str(len(iTracks[0])) +
                       ' tracks) from master..')

                #
                # THREADED COMPONENT
                #
                numberOfThreads = jobs
                c.info('Splitting master into ' + str(jobs) + ' pieces..')
                splittedOutputTracks = u.split_list(mergedOutputTracks,
                                                    numberOfThreads)

                # list of threads
                t = [None] * numberOfThreads

                # list of alive flags
                a = [None] * numberOfThreads

                # list of tempFiles
                f = [None] * numberOfThreads

                for n in xrange(numberOfThreads):
                    # mark thread as alive
                    a[n] = True
                    # fire the thread and give it a filename based on the number
                    tmpFile = tempfile.mkstemp('.trk', 't_calc')[1]
                    f[n] = tmpFile
                    t[n] = Process(target=TrackvisCalcLogic.sub,
                                   args=(splittedOutputTracks[n][:],
                                         iTracks[0][:], tmpFile, verbose,
                                         'Thread-' + str(n + 1)))
                    c.info("Starting Thread-" + str(n + 1) + "...")
                    t[n].start()

                allDone = False

                while not allDone:

                    time.sleep(1)

                    for n in xrange(numberOfThreads):

                        a[n] = t[n].is_alive()

                    if not any(a):
                        # if no thread is alive
                        allDone = True

                #
                # END OF THREADED COMPONENT
                #
                c.info("All Threads done!")

                c.info("Merging output..")
                # now read all the created tempFiles and merge'em to one
                # first thread output is the master here
                tmpMaster = f[0]
                tMasterTracks = io.loadTrk(tmpMaster)
                for tmpFileNo in xrange(1, len(f)):
                    tTracks = io.loadTrk(f[tmpFileNo])

                    # add them
                    mergedOutputTracks = TrackvisCalcLogic.add(
                        tMasterTracks[0], tTracks[0])

                c.info("Merging done!")

            # some stats
            c.info('Number of output tracks after final removal: ' +
                   str(len(mergedOutputTracks)))
            outputTracks = mergedOutputTracks

        # now save the outputTracks
        io.saveTrk(output, outputTracks, header)

        c.info('All done!')
コード例 #9
0
ファイル: fy.py プロジェクト: FNNDSC/fyborg
  def roi_extract( self, inputs, outputs ):
    '''
    '''
    # check if we have all required input data
    # we need at least: 
    #  - outputs['fibers_mapped'] == Track file in T1 space with mapped scalars
    if not os.path.exists( outputs['fibers_final'] ):
      c.error( Colors.RED + 'Could not find ' + Colors.YELLOW + outputs['fibers_final'] + Colors.RED + ' but we really need it to start with stage 6!!' + Colors._CLEAR )
      sys.exit( 2 )

    s = io.loadTrk( outputs['fibers_final'] )
    tracks = s[0]
    header = s[1]

    scalarNames = header['scalar_name'].tolist()
    labels = {}

    # check if the segmentation is mapped
    try:
      seg_index = scalarNames.index( 'segmentation' )
    except:
      c.error( Colors.RED + 'Could not find ' + Colors.YELLOW + 'segmentation' + Colors.RED + ' as a mapped scalar but we really need it!' )
      sys.exit( 2 )

    # create the roi subfolder
    if not os.path.exists( outputs['roi'] ):
      os.mkdir( outputs['roi'] )

    # parse the color table
    lut = colortable.freesurfer.split( '\n' )
    colors = {}
    for color in lut:
      if not color or color[0] == '#':
        continue

      splitted_line = color.split( ' ' )
      splitted_line = filter( None, splitted_line )
      colors[splitted_line[0]] = splitted_line[1]


    # loop through tracks
    for i, t in enumerate( tracks ):

      tCoordinates = t[0]
      tScalars = t[1]

      # grab the scalars for each point
      for scalar in tScalars:

        # but only the label value
        label_value = str( int( scalar[seg_index] ) )

        if not label_value in labels:

          labels[label_value] = []

        if not i in labels[label_value]:
          # store the unique fiber id for this label
          labels[label_value].append( i )

    # now loop through all detected labels
    for l in labels:

      new_tracks = []

      for t_id in labels[l]:
        # grab the fiber + scalars
        current_fiber = tracks[t_id]

        new_tracks.append( current_fiber )

      # now store the trk file
      trk_outputfile = l + '_' + colors[l] + '.trk'
      nii_outputfile = l + '_' + colors[l] + '.nii'
      c.info( Colors.YELLOW + '  Creating fiber ROI ' + Colors.PURPLE + trk_outputfile + Colors.YELLOW + '!' + Colors._CLEAR )
      io.saveTrk( os.path.join( outputs['roi'], trk_outputfile ), new_tracks, header, None, True )

      # also create a roi label volume for this label value
      c.info( Colors.YELLOW + '  Creating NII ROI ' + Colors.PURPLE + nii_outputfile + Colors.YELLOW + '!' + Colors._CLEAR )
      cmd = 'ss;'
      cmd += 'chb-fsstable;'
      cmd += 'mri_binarize --i ' + outputs['segmentation'] + ' --o ' + os.path.join( outputs['roi'], nii_outputfile ) + ' --match ' + l + ' --binval ' + l + ';'
      self.__logger.debug( cmd )
      sp = subprocess.Popen( ["/bin/bash", "-i", "-c", cmd], stdout=sys.stdout )
      sp.communicate()
コード例 #10
0
ファイル: fy.py プロジェクト: FNNDSC/fyborg
  def connectivity( self, inputs, outputs, cortex_only ):
    '''
    Generate connectivity matrices using mapped values.
    '''
    # check if we have all required input data
    # we need at least: 
    #  - outputs['fibers_mapped'] == Track file in T1 space with mapped scalars
    if not os.path.exists( outputs['fibers_final'] ):
      c.error( Colors.RED + 'Could not find ' + Colors.YELLOW + outputs['fibers_final'] + Colors.RED + ' but we really need it to start with stage 5!!' + Colors._CLEAR )
      sys.exit( 2 )

    s = io.loadTrk( outputs['fibers_final'] )
    tracks = s[0]
    header = s[1]

    scalarNames = header['scalar_name'].tolist()
    matrix = {}
    indices = {}

    # check if the segmentation is mapped
    try:
      indices['segmentation'] = scalarNames.index( 'segmentation' )
    except:
      c.error( Colors.RED + 'Could not find ' + Colors.YELLOW + 'segmentation' + Colors.RED + ' as a mapped scalar but we really need it!' )
      sys.exit( 2 )

    if cortex_only:
      labels = [2012, 2019, 2032, 2014, 2020, 2018, 2027, 2028, 2003, 2024, 2017, 2026, 2002, 2023, 2010, 2022, 2031, 2029, 2008, 2025, 2005, 2021, 2011, 2013, 2007, 2016, 2006, 2033, 2009, 2015, 2001, 2030, 2034, 2035, 1012, 1019, 1032, 1014, 1020, 1018, 1027, 1028, 1003, 1024, 1017, 1026, 1002, 1023, 1010, 1022, 1031, 1029, 1008, 1025, 1005, 1021, 1011, 1013, 1007, 1016, 1006, 1033, 1009, 1015, 1001, 1030, 1034, 1035]
    else:
      labels = [2012, 2019, 2032, 2014, 2020, 2018, 2027, 2028, 2003, 2024, 2017, 2026, 2002, 2023, 2010, 2022, 2031, 2029, 2008, 2025, 2005, 2021, 2011, 2013, 2007, 2016, 2006, 2033, 2009, 2015, 2001, 2030, 2034, 2035, 49, 50, 51, 52, 58, 53, 54, 1012, 1019, 1032, 1014, 1020, 1018, 1027, 1028, 1003, 1024, 1017, 1026, 1002, 1023, 1010, 1022, 1031, 1029, 1008, 1025, 1005, 1021, 1011, 1013, 1007, 1016, 1006, 1033, 1009, 1015, 1001, 1030, 1034, 1035, 10, 11, 12, 13, 26, 17, 18, 16]

    c.info( Colors.YELLOW + '  Getting ready to create connectivity matrices for the following labels: ' + Colors.PURPLE + str( labels ) + Colors._CLEAR )
    c.info( Colors.YELLOW + '  Note: Mapped scalar values along the points will be averaged for each fiber track.' + Colors._CLEAR )

    # create matrices for the attached scalars
    for i, s in enumerate( scalarNames ):

      if i >= header['n_scalars']:
        break

      if not s or s == 'segmentation':
        continue

      # this is a scalar value for which a matrix will be created
      matrix[s] = np.zeros( [len( labels ), len( labels )] )
      indices[s] = scalarNames.index( s )
      c.info( Colors.YELLOW + '  Preparing matrix (' + Colors.PURPLE + '[' + str( len( labels ) ) + 'x' + str( len( labels ) ) + ']' + Colors.YELLOW + ') for ' + Colors.PURPLE + s + Colors.YELLOW + ' values!' + Colors._CLEAR )

      if s == 'adc':
        s = 'inv_adc'
        matrix[s] = np.zeros( [len( labels ), len( labels )] )
        indices[s] = scalarNames.index( 'adc' )
        c.info( Colors.YELLOW + '  Preparing matrix (' + Colors.PURPLE + '[' + str( len( labels ) ) + 'x' + str( len( labels ) ) + ']' + Colors.YELLOW + ') for ' + Colors.PURPLE + s + Colors.YELLOW + ' values!' + Colors._CLEAR )


    # always create one for the fiber counts
    matrix['fibercount'] = np.zeros( [len( labels ), len( labels )] )
    indices['fibercount'] = 0
    c.info( Colors.YELLOW + '  Preparing matrix (' + Colors.PURPLE + '[' + str( len( labels ) ) + 'x' + str( len( labels ) ) + ']' + Colors.YELLOW + ') for ' + Colors.PURPLE + 'fibercount' + Colors.YELLOW + ' values!' + Colors._CLEAR )

    c.info( Colors.YELLOW + '  Analyzing fibers of ' + Colors.PURPLE + os.path.split( outputs['fibers_final'] )[1] + Colors.YELLOW + '..' + Colors._CLEAR )
    for tCounter, t in enumerate( tracks ):

      tCoordinates = t[0]
      tScalars = t[1]

      # find the segmentation labels for the start and end points
      start_label = tScalars[0, indices['segmentation']]
      end_label = tScalars[-1, indices['segmentation']]

      try:
        # now grab the index of the labels in our label list
        start_index = labels.index( start_label )
        end_index = labels.index( end_label )
      except:
        # this label is not monitored, so ignore this track
        continue

      # loop through all different scalars
      for m in matrix:

        # calculate the mean for each track
        value = np.mean( tScalars[:, indices[m]] )

        if m == 'inv_adc':
          # invert the value since it is 1-ADC
          value = 1 / value
        elif m == 'fibercount':
          # in the case of fibercount, add 1
          value = 1

        # store value in the matrix
        matrix[m][start_index, end_index] += value
        if not start_index == end_index:
          matrix[m][end_index, start_index] += value

    # fiber loop is done, all values are stored
    # now normalize the matrices
    np.seterr( all='ignore' ) # avoid div by 0 warnings
    cbar = None
    for m in matrix:
      if not m == 'fibercount':
        # normalize it
        matrix[m][:] /= matrix['fibercount']
        matrix[m] = np.nan_to_num( matrix[m] )

      # store the matrix
      c.info( Colors.YELLOW + '  Storing ' + Colors.PURPLE + m + Colors.YELLOW + ' connectivity matrix as ' + Colors.PURPLE + os.path.split( outputs['matrix_' + m] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )
      np.savetxt( outputs['matrix_' + m], matrix[m], delimiter='\t' )

      # store a picture
      picture_path = os.path.splitext( os.path.split( outputs['matrix_' + m] )[1] )[0] + '.png'
      c.info( Colors.YELLOW + '  Generating ' + Colors.PURPLE + m + ' image' + Colors.YELLOW + ' as ' + Colors.PURPLE + picture_path + Colors.YELLOW + '!' + Colors._CLEAR )
      img = plot.imshow( matrix[m], interpolation='nearest' )
      img.set_cmap( 'jet' )
      img.set_norm( LogNorm() )
      img.axes.get_xaxis().set_visible( False )
      img.axes.get_yaxis().set_visible( False )
      if not cbar:
        cbar = plot.colorbar()
      cbar.set_label( m )
      cbar.set_ticks( [] )
      plot.savefig( os.path.join( os.path.split( outputs['matrix_' + m] )[0], picture_path ) )

    np.seterr( all='warn' ) # reactivate div by 0 warnings

    # now store the matlab version as well
    c.info( Colors.YELLOW + '  Storing ' + Colors.PURPLE + 'matlab data bundle' + Colors.YELLOW + ' containing ' + Colors.PURPLE + 'all matrices' + Colors.YELLOW + ' as ' + Colors.PURPLE + os.path.split( outputs['matrix_all'] )[1] + Colors.YELLOW + '!' + Colors._CLEAR )
    scipy.io.savemat( outputs['matrix_all'], matrix, oned_as='row' )
コード例 #11
0
ファイル: fy.py プロジェクト: FNNDSC/fyborg
  parser.add_argument( '-co', '--cortex_only', action='store_true', dest='cortex_only', help='Perform filtering for cortex specific analysis and skip sub-cortical structures.' )
  parser.add_argument( '-s', '--stage', action='store', dest='stage', default=0, type=int, help='Start with a specific stage while skipping the ones before. E.g. --stage 3 directly starts the mapping without preprocessing, --stage 4 starts with the filtering' )
  parser.add_argument( '-overwrite', '--overwrite', action='store_true', dest='overwrite', help='Overwrite any existing output. DANGER!!' )
  parser.add_argument( '-v', '--verbose', action='store_true', dest='verbose', help='Show verbose output' )

  # always show the help if no arguments were specified
  if len( sys.argv ) == 1:
    parser.print_help()
    sys.exit( 1 )

  options = parser.parse_args()

  # validate the inputs here
  if not os.path.isdir( options.input ):

    c.error( Colors.RED + 'Could not find the input directory! Specify a valid directory using -i $PATH.' + Colors._CLEAR )
    sys.exit( 2 )

  if os.path.exists( options.output ) and int( options.stage ) == 0:

    if not options.overwrite:
      c.error( Colors.RED + 'The output directory exists! Add --overwrite to erase previous content!' + Colors._CLEAR )
      c.error( Colors.RED + 'Or use --stage > 2 to start with a specific stage which re-uses the previous content..' + Colors._CLEAR )
      sys.exit( 2 )
    else:
      # silently delete the existing output
      shutil.rmtree( options.output )

  if options.stage > 0 and not os.path.exists( options.output ):
    # we start with a specific stage so we need the output stuff
    c.error( Colors.RED + 'The output directory does not exist! We need it when using -s/--stage to resume the process!' + Colors._CLEAR )
コード例 #12
0
ファイル: fyborg.py プロジェクト: FNNDSC/fyborg
def fyborg( trkFile, outputTrkFile, actions, *args ):

  if not actions:
    c.error( "We gotta do something.." )
    return

  showDebug = 'debug' in args

  singleThread = 'singlethread' in args

  c.debug( "trkFile:" + str( trkFile ), showDebug )
  c.debug( "outputTrkFile:" + str( outputTrkFile ), showDebug )
  c.debug( "args:" + str( args ), showDebug )



  # load trk file
  s = io.loadTrk( trkFile )
  tracks = s[0]
  origHeader = s[1]
  tracksHeader = numpy.copy( s[1] )
  numberOfScalars = origHeader['n_scalars']
  scalars = origHeader['scalar_name'].tolist()
  numberOfTracks = origHeader['n_count']

  # show some file informations
  printTrkInfo( tracksHeader, trkFile )

  # grab the scalarNames
  scalarNames = []
  for a in actions:
    if a.scalarName() != FyAction.NoScalar:
      scalarNames.append( a.scalarName() )

  # increase the number of scalars
  tracksHeader['n_scalars'] += len( scalarNames )

  # .. attach the new scalar names
  for i in range( len( scalarNames ) ):
    tracksHeader['scalar_name'][numberOfScalars + i] = scalarNames[i]

  #
  # THREADED COMPONENT
  #
  if singleThread:
    numberOfThreads = 1
  else:
    numberOfThreads = multiprocessing.cpu_count()
  c.info( 'Splitting master into ' + str( numberOfThreads ) + ' pieces..' )
  splittedOutputTracks = u.split_list( tracks[:], numberOfThreads )

  # list of threads
  t = [None] * numberOfThreads

  # list of alive flags
  a = [None] * numberOfThreads

  # list of tempFiles
  f = [None] * numberOfThreads

  for n in xrange( numberOfThreads ):
    # configure actions
    __actions = []
    for act in actions:
      __actions.append( act )

    # mark thread as alive
    a[n] = True
    # fire the thread and give it a filename based on the number
    tmpFile = tempfile.mkstemp( '.trk', 'fyborg' )[1]
    f[n] = tmpFile
    t[n] = Process( target=fyborgLooper_, args=( splittedOutputTracks[n][:], tracksHeader, tmpFile, __actions, showDebug, n + 1 ) )
    c.info( "Starting Thread-" + str( n + 1 ) + "..." )
    t[n].start()

  allDone = False

  while not allDone:

    time.sleep( 1 )

    for n in xrange( numberOfThreads ):

      a[n] = t[n].is_alive()

    if not any( a ):
      # if no thread is alive
      allDone = True

  #
  # END OF THREADED COMPONENT
  #
  c.info( "All Threads done!" )

  #
  # Merging stage
  #
  c.info( "Merging tracks.." )

  outputTracks = []
  # now read all the created tempFiles and merge'em to one
  for tmpFileNo in xrange( 0, len( f ) ):
    tTracks = io.loadTrk( f[tmpFileNo] )

    # add them
    outputTracks.extend( tTracks[0] )

  c.info( "Merging done!" )

  io.saveTrk( outputTrkFile, outputTracks, tracksHeader, None, True )

  c.info( "All done!" )