Example #1
0
def save_track_info(xml_filename, input_folder, output_folder, output_filename, TEST_RUN=False):
	
	# Read in the tracking information from the TrackMate xml file
	xmlFile = File(input_folder + xml_filename)
	reader = TmXmlReader(xmlFile)
	if not reader.isReadingOk():
	    sys.exit(reader.getErrorMessage())
	
	model = reader.getModel() # Get a full model # model is a fiji.plugin.trackmate.Model
	model.setLogger(Logger.IJ_LOGGER) # Set logger for the model
	fm = model.getFeatureModel() # The feature model, that stores edge and track features.

	# Open a file handle to save the information of the tracks
	f_out = open(output_folder + output_filename, 'w')
	f_out.write('track_name' + ',' + 'track_id' + ',' + 'spot_id' + ',' + 'x'  + ',' + 'y' + ',' + 'z' + ',' + 't' + '\n')

	#--------------------------------------------------------------------
	# The following loop goes through all the tracks loaded from the XML
	#--------------------------------------------------------------------
	
	test_count = 0
	for track_id in model.getTrackModel().trackIDs(True):
	
	#	# This trick is useful if some track_id needs to be come back at later
	#	if not track_id == 152:
	#		continue
		
		if TEST_RUN == True and test_count > 1:
			break
		test_count = test_count + 1
	
	
		# Fetch the track feature from the feature model.
		nSplit = fm.getTrackFeature(track_id, 'NUMBER_SPLITS')
		model.getLogger().log('')
		model.getLogger().log('Track ' + str(track_id) + ': number of splitting events = ' + str(nSplit))
	
		if nSplit > 0:
			model.getLogger().log('Branched tracks are skipped for now. They will be dealt with in the future.')
		else:
			track = model.getTrackModel().trackSpots(track_id)
			track_name = model.getTrackModel().name(track_id)
	
			for spot in track:
				spotID = spot.ID()
				# Fetch spot features directly from spot.
				x = spot.getFeature('POSITION_X')
				y = spot.getFeature('POSITION_Y')
				z = spot.getFeature('POSITION_Z')
				f = int(spot.getFeature('FRAME')+1)

				f_out.write(str(track_name) + ',' + str(track_id) + ',' + str(spotID) + ',' + str(x)  + ',' + str(y) + ',' + str(z) + ',' + str(f) + '\n')
	f_out.close()
	return os.path.isfile(output_folder + output_filename)
Example #2
0
settings = Settings()
settings.setFrom(imp)

#Take the rest of the settings from a xml file.
file = File(
    "D:\\uni\\TFG\\TrackingImageJInfo\\ScriptingWithPy\\TrackFilterAfter.xml")

# We have to feed a logger to the reader.
logger = Logger.IJ_LOGGER

#-------------------
# Instantiate reader
#-------------------

reader = TmXmlReader(file)
if not reader.isReadingOk():
    sys.exit(reader.getErrorMessage())

#---------------------------------------
# Building a settings object from a file
#---------------------------------------

# Reading the Settings object is actually currently complicated. The
# reader wants to initialize properly everything you saved in the file,
# including the spot, edge, track analyzers, the filters, the detector,
# the tracker, etc...
# It can do that, but you must provide the reader with providers, that
# are able to instantiate the correct TrackMate Java classes from
# the XML data.
Example #3
0
def magic(file):
    # We have to feed a logger to the reader.
    logger = Logger.IJ_LOGGER

    #-------------------
    # Instantiate reader
    #-------------------

    reader = TmXmlReader(File(file))
    if not reader.isReadingOk():
        sys.exit(reader.getErrorMessage())
    #-----------------
    # Get a full model
    #-----------------

    # This will return a fully working model, with everything
    # stored in the file. Missing fields (e.g. tracks) will be
    # null or None in python
    model = reader.getModel()
    # model is a fiji.plugin.trackmate.Model

    #model = Model()
    #model.setSpots(model2.getSpots(), True)

    #----------------
    # Display results
    #----------------

    # We can now plainly display the model. It will be shown on an
    # empty image with default magnification.
    sm = SelectionModel(model)
    #displayer = HyperStackDisplayer(model, sm)
    #displayer.render()

    #---------------------------------------------
    # Get only part of the data stored in the file
    #---------------------------------------------

    # You might want to access only separate parts of the
    # model.

    spots = model.getSpots()
    # spots is a fiji.plugin.trackmate.SpotCollection

    logger.log(str(spots))

    # If you want to get the tracks, it is a bit trickier.
    # Internally, the tracks are stored as a huge mathematical
    # simple graph, which is what you retrieve from the file.
    # There are methods to rebuild the actual tracks, taking
    # into account for everything, but frankly, if you want to
    # do that it is simpler to go through the model:

    #---------------------------------------
    # Building a settings object from a file
    #---------------------------------------

    # Reading the Settings object is actually currently complicated. The
    # reader wants to initialize properly everything you saved in the file,
    # including the spot, edge, track analyzers, the filters, the detector,
    # the tracker, etc...
    # It can do that, but you must provide the reader with providers, that
    # are able to instantiate the correct TrackMate Java classes from
    # the XML data.

    # We start by creating an empty settings object
    settings = Settings()

    # Then we create all the providers, and point them to the target model:
    detectorProvider        = DetectorProvider()
    trackerProvider         = TrackerProvider()
    spotAnalyzerProvider    = SpotAnalyzerProvider()
    edgeAnalyzerProvider    = EdgeAnalyzerProvider()
    trackAnalyzerProvider   = TrackAnalyzerProvider()

    # Ouf! now we can flesh out our settings object:
    reader.readSettings(settings, detectorProvider, trackerProvider, spotAnalyzerProvider, edgeAnalyzerProvider, trackAnalyzerProvider)
    settings.detectorFactory = ManualDetectorFactory()


    # Configure tracker - We want to allow merges and fusions
    settings.initialSpotFilterValue = 0
    settings.trackerFactory = SparseLAPTrackerFactory()
    settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap()  # almost good enough
    settings.trackerSettings['ALLOW_TRACK_SPLITTING'] = True
    settings.trackerSettings['ALLOW_TRACK_MERGING'] = False
    settings.trackerSettings['LINKING_MAX_DISTANCE'] = 40.0
    settings.trackerSettings['ALLOW_GAP_CLOSING'] = True
    settings.trackerSettings['ALLOW_TRACK_SPLITTING'] = True
    settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 30.0
    settings.trackerSettings['MAX_FRAME_GAP'] = 4

    # Configure track analyzers - Later on we want to filter out tracks
    # based on their displacement, so we need to state that we want
    # track displacement to be calculated. By default, out of the GUI,
    # not features are calculated.

    # The displacement feature is provided by the TrackDurationAnalyzer.

    settings.addTrackAnalyzer(TrackDurationAnalyzer())
    settings.addTrackAnalyzer(TrackBranchingAnalyzer())
    settings.addTrackAnalyzer(TrackIndexAnalyzer())
    settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer())
    settings.addTrackAnalyzer(LinearTrackDescriptor())
    # Configure track filters - We want to get rid of the two immobile spots at
    # the bottom right of the image. Track displacement must be above 10 pixels.

    filter2 = FeatureFilter('NUMBER_SPOTS', 31, True)
    settings.addTrackFilter(filter2)
    #filter3 = FeatureFilter('NUMBER_GAPS', 2, False)
    #settings.addTrackFilter(filter3)
    filter4 = FeatureFilter('NUMBER_SPLITS', 0.5, False)
    settings.addTrackFilter(filter4)


    settings.addEdgeAnalyzer(EdgeTargetAnalyzer())
    settings.addEdgeAnalyzer(EdgeTimeLocationAnalyzer())
    settings.addEdgeAnalyzer(EdgeVelocityAnalyzer())
    settings.addEdgeAnalyzer(LinearTrackEdgeStatistics())

    #-------------------
    # Instantiate plugin
    #-------------------
    logger.log(str('\n\nSETTINGS:'))
    logger.log(unicode(settings))
    print("tracking")
    spots = model.getSpots()
    # spots is a fiji.plugin.trackmate.SpotCollection

    logger.log(str(spots))
    logger.log(str(spots.keySet()))


    # The settings object is also instantiated with the target image.
    # Note that the XML file only stores a link to the image.
    # If the link is not valid, the image will not be found.
    #imp = settings.imp
    #imp.show()

    # With this, we can overlay the model and the source image:

    trackmate = TrackMate(model, settings)

    #--------
    # Process
    #--------

    ok = trackmate.checkInput()
    if not ok:
        sys.exit(str(trackmate.getErrorMessage()))

    trackmate.execInitialSpotFiltering()
    trackmate.execSpotFiltering(True)
    trackmate.execTracking()
    trackmate.computeTrackFeatures(True)
    trackmate.execTrackFiltering(True)
    trackmate.computeEdgeFeatures(True)

    outfile = TmXmlWriter(File(str(file[:-4] + ".trackmate.xml")))
    outfile.appendSettings(settings)
    outfile.appendModel(model)
    outfile.writeToFile()

    ISBIChallengeExporter.exportToFile(model, settings, File(str(file[:-4] + ".ISBI.xml")))
Example #4
0
def TrackMate_main(infile, outfile):
    file = File(infile)

    # We have to feed a logger to the reader.
    logger = Logger.IJ_LOGGER

    #-------------------
    # Instantiate reader
    #-------------------

    reader = TmXmlReader(file)
    if not reader.isReadingOk():
        sys.exit(reader.getErrorMessage())
    #-----------------
    # Get a full model
    #-----------------

    # This will return a fully working model, with everything
    # stored in the file. Missing fields (e.g. tracks) will be
    # null or None in python
    model = reader.getModel()
    model.setLogger(Logger.IJ_LOGGER)
    # model is a fiji.plugin.trackmate.Model

    #---------------------------------------
    # Building a settings object from a file
    #---------------------------------------

    # We start by creating an empty settings object
    settings = Settings()

    # Then we create all the providers, and point them to the target model:
    detectorProvider = DetectorProvider()
    trackerProvider = TrackerProvider()
    spotAnalyzerProvider = SpotAnalyzerProvider()
    edgeAnalyzerProvider = EdgeAnalyzerProvider()
    trackAnalyzerProvider = TrackAnalyzerProvider()

    reader.readSettings(settings, detectorProvider, trackerProvider,
                        spotAnalyzerProvider, edgeAnalyzerProvider,
                        trackAnalyzerProvider)

    #----------------
    # Save results
    #----------------

    # The feature model, that stores edge and track features.
    fm = model.getFeatureModel()

    f = open(outfile, 'wb')

    for id in model.getTrackModel().trackIDs(True):
        track = model.getTrackModel().trackSpots(id)
        for spot in track:
            sid = spot.ID()
            # Fetch spot features directly from spot.
            x = spot.getFeature('POSITION_X')
            y = spot.getFeature('POSITION_Y')
            t = spot.getFeature('FRAME')
            q = spot.getFeature('QUALITY')
            snr = spot.getFeature('SNR')
            mean = spot.getFeature('MEAN_INTENSITY')

            semiaxislength_c = spot.getFeature('ELLIPSOIDFIT_SEMIAXISLENGTH_C')
            if semiaxislength_c is None:
                semiaxislength_c = 0

            semiaxislength_b = spot.getFeature('ELLIPSOIDFIT_SEMIAXISLENGTH_B')
            if semiaxislength_b is None:
                semiaxislength_b = 0

            phi_b = spot.getFeature('ELLIPSOIDFIT_AXISPHI_B')
            if phi_b is None:
                phi_b = 0

            data = Array.newInstance(Class.forName("java.lang.String"), 9)
            #String[] entries = "first#second#third".split("#");
            data[0] = str(sid)
            data[1] = str(id)
            data[2] = str(x)
            data[3] = str(y)
            data[4] = str(t)
            data[5] = str(semiaxislength_c)
            data[6] = str(semiaxislength_b)
            data[7] = str(phi_b)
            data[8] = str(mean)

            # create csv writer
            writer = csv.writer(f)

            row = [
                data[0], data[1], data[2], data[3], data[4], data[5], data[6],
                data[7], data[8]
            ]
            writer.writerow(row)

    f.close()
    print('Saved ' + str(model.getTrackModel().nTracks(True)) + ' tracks.')
Example #5
0
  
#----------------
# Setup variables
#----------------
  
# Put here the path to the TrackMate file you want to load
file = File('/Users/tinevez/Desktop/iconas/Data/FakeTracks.xml')
  
# We have to feed a logger to the reader.
logger = Logger.IJ_LOGGER
  
#-------------------
# Instantiate reader
#-------------------
  
reader = TmXmlReader(file)
if not reader.isReadingOk():
    sys.exit(reader.getErrorMessage())
#-----------------
# Get a full model
#-----------------
  
# This will return a fully working model, with everything
# stored in the file. Missing fields (e.g. tracks) will be 
# null or None in python
model = reader.getModel()
# model is a fiji.plugin.trackmate.Model
  
#----------------
# Display results
#----------------
InputFolderPath = ''
if (chooser.showOpenDialog(None) == JFileChooser.APPROVE_OPTION):
    IJ.log("getCrrentDirectory(): " + chooser.getCurrentDirectory().toString())
    InputFolderPath = chooser.getSelectedFile().toString()
else:
    IJ.log("No selection")
file = File(InputFolderPath)

# We have to feed a logger to the reader.
logger = Logger.IJ_LOGGER

#-------------------
# Instantiate reader
#-------------------

reader = TmXmlReader(file)
if not reader.isReadingOk():
    sys.exit(reader.getErrorMessage())
model = reader.getModel()

spots = model.getSpots()
spotIterator = spots.iterator(False)
chooser2 = JFileChooser()
fakefile = File(defaultpath)
chooser2.setCurrentDirectory(fakefile)
chooser2.setDialogTitle("Select Folder")
chooser2.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY)
chooser2.setAcceptAllFileFilterUsed(False)
frontoutputpath2 = ''
if (chooser2.showOpenDialog(None) == JFileChooser.APPROVE_OPTION):
    IJ.log("getCrrentDirectory(): " +
def save_snap_shot_seq_simple(imp, xml_filename, input_folder, output_folder,
							  ZOOM_BY_Z_DEPTH=False, Z1_CLOSE_TO_COVERSLIP=False,
							  TEST_RUN=False, ANNOTATE_Z=False,
							  DRAW_DOT=True, DRAW_BOX=True, L_RECT=100,
							  trackIDs=None, z_number_to_project=1):
	
	# Read in the tracking information from the TrackMate xml file
	xmlFile = File(input_folder + xml_filename)
	reader = TmXmlReader(xmlFile)
	if not reader.isReadingOk():
	    sys.exit(reader.getErrorMessage())
	
	model = reader.getModel() # Get a full model # model is a fiji.plugin.trackmate.Model
	model.setLogger(Logger.IJ_LOGGER) # Set logger for the model
	fm = model.getFeatureModel() # The feature model, that stores edge and track features.


	# Get the pixel calibration data
	calibr = imp.getCalibration()
	w = calibr.pixelWidth
	d = calibr.pixelDepth
	print("pixel width and depth: ", w, d)
	
	# Get the dimensions of the images
	cN = imp.getNChannels()
	zN = imp.getNSlices()
	tN = imp.getNFrames()
	print("Channel number, z slice number, frame number:", cN, zN, tN)

	# Calculate the half z depth for looping fromt he desired z_number_to_project variable
	z_half = int( ( z_number_to_project - 1 ) / 2 )

	#--------------------------------------------------------------------
	# The following loop goes through all the tracks loaded from the XML
	#--------------------------------------------------------------------
	
	test_count = 0
	if trackIDs is None:
		trackIDs = model.getTrackModel().trackIDs(True)
	for track_id in trackIDs:
	
	#	# This trick is useful if some track_id needs to be come back at later
	#	if not track_id == 152:
	#		continue
		
		if TEST_RUN == True and test_count > 1:
			break
		test_count = test_count + 1
		
		# Fetch the track feature from the feature model.
		nSplit = fm.getTrackFeature(track_id, 'NUMBER_SPLITS')
		model.getLogger().log('')
		model.getLogger().log('Track ' + str(track_id) + ': number of splitting events = ' + str(nSplit))
	
		if nSplit > 0:
			model.getLogger().log('Branched tracks are skipped for now. They will be dealt with in the future.')
		else:
			track = model.getTrackModel().trackSpots(track_id)
			track_name = model.getTrackModel().name(track_id)
			
			# Create a tif folder to store spot snapshots
			tif_folder = output_folder + 'trackName-' + str(track_name) + '-trackID-' + '%04d'%(track_id) + '/'
			if not os.path.exists(tif_folder):
				os.makedirs(tif_folder)
	
			for spot in track:
				spotID = spot.ID()
				# Fetch spot features directly from spot.
				x0 = spot.getFeature('POSITION_X')
				x = int(x0/w) # convert to pixel
				y0 = spot.getFeature('POSITION_Y')
				y = int(y0/w) # convert to pixel
				z0 = spot.getFeature('POSITION_Z')
				z = int(z0/d+1) # convert to pixel
				f = int(spot.getFeature('FRAME')+1)
	
				for c in range(cN):
					current_c = c + 1
					
					for current_z in range( z - z_half , z + z_half + 1 ):
						
						# Convert specified position (c,z,f) to index assuming hyperstack order is czt (default)
						sliceIndex = int(cN * zN * (f-1) + cN * (current_z-1) + current_c)
						imp.setSlice(sliceIndex)
#						ipTemp = imp.getProcessor()
#						impTemp = ImagePlus('temp', ipTemp)
						impTemp = imp.crop('slice')

						
						if DRAW_BOX == True:
							# Draw a box around the tracked dot
							ip = impTemp.getProcessor()
#							ip.setColor(Color.BLUE)
#							ip.setColor(Color.CYAN)
							ip.setColor(Color.WHITE)
							ip.setLineWidth(2)
							ip.drawRect(int(x-L_RECT/2), int(y-L_RECT/2), L_RECT, L_RECT)
							impTemp.setProcessor(ip)
						
						if DRAW_DOT == True:
							# Draw a dot at the center of image
							ip = impTemp.getProcessor()
#							ip.setColor(Color.BLUE)
#							ip.setColor(Color.CYAN)
							ip.setColor(Color.WHITE)
							ip.setLineWidth(4)
							ip.drawDot(x, y)
							impTemp.setProcessor(ip)
	
						if ZOOM_BY_Z_DEPTH == True:
							minZoom = 0.5 # this parameter specified the minimal zoom when shrinking images
							# First, expand the snapshot by 2 times to make them less pixelated, also prepare for the zoom-by-z
							impTemp = slices.resizeImage(impTemp, 1/minZoom)
							# Second, zoom out the image depending on distance from coverslip, but keep the canvas size
							impTemp = slices.zoomImageByZ( impTemp, z, zN, minZoom, Z1_CLOSE_TO_COVERSLIP )

						if ANNOTATE_Z == True:
							# annotate the z position at the bottom center

							ip = impTemp.getProcessor()
#							ip.setJustification(1) # 0, 1, 2 represents left, center and right justification.
							ip.setJustification(0) # 0, 1, 2 represents left, center and right justification.
							arial_font = Font("Arial", 0, 18) # 0 = PLAIN, 18 is font size
							ip.setFont(arial_font)
							ip.setAntialiasedText(True) # It appears that only on RGB the anti-aliased text works
							ip.setColor(Color.WHITE)
#							annotation_x_pos = int(ip.getWidth()/2)
#							annotation_x_pos = int(ip.getWidth() - 28)
#							annotation_y_pos = int(ip.getHeight() - 2)
							annotation_x_pos = 2
							annotation_y_pos = 23
#							print(annotation_x_pos, annotation_y_pos)
							if Z1_CLOSE_TO_COVERSLIP == True:
								z_relative = 2 * current_z
							else:
								z_relative = 2 * (zN - current_z + 1)
							ip.drawString( "z = "+str(z_relative)+" "+u"\u00B5"+"m", annotation_x_pos, annotation_y_pos )
							
							impTemp.setProcessor(ip)
	
						# Make a meaningful file name and save the file
						outFileName = tif_folder + 't' + '%04d'%(f) + '_z' + '%03d'%(current_z) + '_c' + str(current_c) + "_" + str(spotID) + ".tif"
						FileSaver(impTemp).saveAsTiff(outFileName)