# Configure tracker settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() settings.trackerSettings['LINKING_MAX_DISTANCE'] = 10.0 settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 10.0 settings.trackerSettings['MAX_FRAME_GAP'] = 3 # Add the analyzers for some spot features. # You need to configure TrackMate with analyzers that will generate # the data you need. # Here we just add two analyzers for spot, one that computes generic # pixel intensity statistics (mean, max, etc...) and one that computes # an estimate of each spot's SNR. # The trick here is that the second one requires the first one to be in # place. Be aware of this kind of gotchas, and read the docs. settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) filter2 = FeatureFilter('QUALITY', 10, True) settings.addSpotFilter(filter2) filter3 = FeatureFilter('MEDIAN_INTENSITY', 10, True) settings.addSpotFilter(filter3) filter4 = FeatureFilter('SNR', 0.5, True) settings.addSpotFilter(filter4) # Add an analyzer for some track features, such as the track mean speed. settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer()) settings.addTrackAnalyzer(TrackDurationAnalyzer()) filter5 = FeatureFilter('TRACK_DISPLACEMENT', 5, True) settings.addTrackFilter(filter5)
def runTrackMate(imp): import fiji.plugin.trackmate.Settings as Settings import fiji.plugin.trackmate.Model as Model import fiji.plugin.trackmate.SelectionModel as SelectionModel import fiji.plugin.trackmate.TrackMate as TrackMate import fiji.plugin.trackmate.Logger as Logger import fiji.plugin.trackmate.detection.DetectorKeys as DetectorKeys import fiji.plugin.trackmate.detection.DogDetectorFactory as DogDetectorFactory import fiji.plugin.trackmate.tracking.sparselap.SparseLAPTrackerFactory as SparseLAPTrackerFactory import fiji.plugin.trackmate.tracking.LAPUtils as LAPUtils import fiji.plugin.trackmate.visualization.hyperstack.HyperStackDisplayer as HyperStackDisplayer import fiji.plugin.trackmate.features.FeatureFilter as FeatureFilter import fiji.plugin.trackmate.features.FeatureAnalyzer as FeatureAnalyzer import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzerFactory as SpotContrastAndSNRAnalyzerFactory import fiji.plugin.trackmate.action.ExportStatsToIJAction as ExportStatsToIJAction import fiji.plugin.trackmate.io.TmXmlReader as TmXmlReader import fiji.plugin.trackmate.action.ExportTracksToXML as ExportTracksToXML import fiji.plugin.trackmate.io.TmXmlWriter as TmXmlWriter import fiji.plugin.trackmate.features.ModelFeatureUpdater as ModelFeatureUpdater import fiji.plugin.trackmate.features.SpotFeatureCalculator as SpotFeatureCalculator import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzer as SpotContrastAndSNRAnalyzer import fiji.plugin.trackmate.features.spot.SpotIntensityAnalyzerFactory as SpotIntensityAnalyzerFactory import fiji.plugin.trackmate.features.track.TrackSpeedStatisticsAnalyzer as TrackSpeedStatisticsAnalyzer import fiji.plugin.trackmate.util.TMUtils as TMUtils import fiji.plugin.trackmate.visualization.trackscheme.TrackScheme as TrackScheme import fiji.plugin.trackmate.visualization.PerTrackFeatureColorGenerator as PerTrackFeatureColorGenerator #------------------------- # Instantiate model object #------------------------- nFrames = imp.getNFrames() model = Model() # Set logger #model.setLogger(Logger.IJ_LOGGER) #------------------------ # Prepare settings object #------------------------ settings = Settings() settings.setFrom(imp) # Configure detector settings.detectorFactory = DogDetectorFactory() settings.detectorSettings = { DetectorKeys.KEY_DO_SUBPIXEL_LOCALIZATION: True, DetectorKeys.KEY_RADIUS: 12.30, DetectorKeys.KEY_TARGET_CHANNEL: 1, DetectorKeys.KEY_THRESHOLD: 100., DetectorKeys.KEY_DO_MEDIAN_FILTERING: False, } # Configure tracker settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() settings.trackerSettings['LINKING_MAX_DISTANCE'] = 10.0 settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 10.0 settings.trackerSettings['MAX_FRAME_GAP'] = 3 # Add the analyzers for some spot features. # You need to configure TrackMate with analyzers that will generate # the data you need. # Here we just add two analyzers for spot, one that computes generic # pixel intensity statistics (mean, max, etc...) and one that computes # an estimate of each spot's SNR. # The trick here is that the second one requires the first one to be in # place. Be aware of this kind of gotchas, and read the docs. settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) # Add an analyzer for some track features, such as the track mean speed. settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer()) settings.initialSpotFilterValue = 1 print(str(settings)) #---------------------- # Instantiate trackmate #---------------------- trackmate = TrackMate(model, settings) #------------ # Execute all #------------ ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) ok = trackmate.process() if not ok: sys.exit(str(trackmate.getErrorMessage())) #---------------- # Display results #---------------- selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() #--------------------- # Select correct spots #--------------------- # Prepare display. sm = SelectionModel(model) color = PerTrackFeatureColorGenerator(model, 'TRACK_INDEX') # launch TrackScheme to select spots and tracks trackscheme = TrackScheme(model, sm) trackscheme.setDisplaySettings('TrackColoring', color) trackscheme.render() # Update image with TrackScheme commands view = HyperStackDisplayer(model, sm, imp) view.setDisplaySettings('TrackColoring', color) view.render() # Wait for the user to select correct spots and tracks before collecting data dialog = WaitForUserDialog( "Spots", "Delete incorrect spots and edit tracks if necessary. (Press ESC to cancel analysis)" ) dialog.show() if dialog.escPressed(): IJ.run("Remove Overlay", "") imp.close() return ([], nFrames) # The feature model, that stores edge and track features. #model.getLogger().log('Found ' + str(model.getTrackModel().nTracks(True)) + ' tracks.') fm = model.getFeatureModel() crds_perSpot = [] for id in model.getTrackModel().trackIDs(True): # Fetch the track feature from the feature model.(remove """ to enable) """v = fm.getTrackFeature(id, 'TRACK_MEAN_SPEED') model.getLogger().log('') model.getLogger().log('Track ' + str(id) + ': mean velocity = ' + str(v) + ' ' + model.getSpaceUnits() + '/' + model.getTimeUnits())""" trackID = str(id) track = model.getTrackModel().trackSpots(id) spot_track = {} for spot in track: sid = spot.ID() # Fetch spot features directly from spot. x = spot.getFeature('POSITION_X') y = spot.getFeature('POSITION_Y') t = spot.getFeature('FRAME') q = spot.getFeature('QUALITY') snr = spot.getFeature('SNR') mean = spot.getFeature('MEAN_INTENSITY') #model.getLogger().log('\tspot ID = ' + str(sid) + ', x='+str(x)+', y='+str(y)+', t='+str(t)+', q='+str(q) + ', snr='+str(snr) + ', mean = ' + str(mean)) spot_track[t] = (x, y) crds_perSpot.append(spot_track) #print ("Spot", crds_perSpot.index(spot_track),"has the following coordinates:", crds_perSpot[crds_perSpot.index(spot_track)]) return (crds_perSpot, nFrames)
def track_cells(folder_w, filename, imp, correction): #imp = IJ.openImage(os.path.join(folder,filename)) #imp.show() #get image dimensions, set ROI remove part of flouresncent ring x_size = ImagePlus.getDimensions(imp)[0] y_size = ImagePlus.getDimensions(imp)[1] x_start = 0 y_start = 0 #calculate alternative ROI if crop_ring: x_start = 170 / 2 y_start = 170 / 2 x_size = x_size - 170 y_size = y_size - 170 print( str(x_start) + ", " + str(y_start) + ", " + str(x_size) + ", " + str(y_size)) imp.setRoi(OvalRoi(x_start, y_start, x_size, y_size)) #imp_dup = imp.duplicate() #imp_dup.show() #red_corrected_img.show() IJ.run(imp, "Make Inverse", "") IJ.setForegroundColor(0, 0, 0) IJ.run(imp, "Fill", "stack") imp.killRoi() #imp.show() #sys.exit() #img_filename = filename+"_corrected_red_stack.tif" #folder_filename= os.path.join(well_folder,img_filename) #IJ.save(imp, folder_filename) #---------------------------- # Create the model object now #---------------------------- # Some of the parameters we configure below need to have # a reference to the model at creation. So we create an # empty model now. model = Model() # Send all messages to ImageJ log window. model.setLogger(Logger.IJ_LOGGER) #------------------------ # Prepare settings object #------------------------ settings = Settings() settings.setFrom(imp) # Configure detector - We use the Strings for the keys settings.detectorFactory = LogDetectorFactory() settings.detectorSettings = { 'DO_SUBPIXEL_LOCALIZATION': SUBPIXEL_LOCALIZATION, 'RADIUS': RADIUS, 'TARGET_CHANNEL': TARGET_CHANNEL, 'THRESHOLD': THRESHOLD, 'DO_MEDIAN_FILTERING': MEDIAN_FILTERING, } # Configure spot filters - Classical filter on quality settings.initialSpotFilterValue = SPOT_FILTER settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotMorphologyAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotRadiusEstimatorFactory()) filter1 = FeatureFilter('QUALITY', QUALITY, True) filter2 = FeatureFilter('CONTRAST', CONTRAST, True) filter2a = FeatureFilter('ESTIMATED_DIAMETER', MAX_ESTIMATED_DIAMETER, False) filter2b = FeatureFilter('MEDIAN_INTENSITY', MAX_MEDIAN_INTENSITY, False) settings.addSpotFilter(filter1) settings.addSpotFilter(filter2) settings.addSpotFilter(filter2a) settings.addSpotFilter(filter2b) print(settings.spotFilters) # Configure tracker - We want to allow merges and fusions settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap( ) # almost good enough ##adapted from https://forum.image.sc/t/trackmate-scripting-automatically-exporting-spots-in-tracks-links-in-tracks-tracks-statistics-and-branching-analysis-to-csv/6256 #linking settings settings.trackerSettings['LINKING_MAX_DISTANCE'] = LINKING_MAX_DISTANCE if LINKING_FEATURE_PENALTIES == True: settings.trackerSettings['LINKING_FEATURE_PENALTIES'] = { LINKING_FEATURE_PENALTIES_TYPE: LINKING_FEATURE_PENALTIES_VALUE } else: settings.trackerSettings['LINKING_FEATURE_PENALTIES'] = {} #gap closing settings settings.trackerSettings['ALLOW_GAP_CLOSING'] = ALLOW_GAP_CLOSING if ALLOW_GAP_CLOSING == True: settings.trackerSettings[ 'GAP_CLOSING_MAX_DISTANCE'] = GAP_CLOSING_MAX_DISTANCE settings.trackerSettings['MAX_FRAME_GAP'] = MAX_FRAME_GAP if GAP_CLOSING_FEATURE_PENALTIES == True: settings.trackerSettings['GAP_CLOSING_FEATURE_PENALTIES'] = { GAP_CLOSING_FEATURE_PENALTIES_TYPE: GAP_CLOSING_FEATURE_PENALTIES_VALUE } else: settings.trackerSettings['GAP_CLOSING_FEATURE_PENALTIES'] = {} #splitting settings settings.trackerSettings['ALLOW_TRACK_SPLITTING'] = ALLOW_TRACK_SPLITTING if ALLOW_TRACK_SPLITTING == True: settings.trackerSettings[ 'SPLITTING_MAX_DISTANCE'] = SPLITTING_MAX_DISTANCE if SPLITTING_FEATURE_PENALTIES == True: settings.trackerSettings['SPLITTING_FEATURE_PENALTIES'] = { SPLITTING_FEATURE_PENALTIES_TYPE: SPLITTING_FEATURE_PENALTIES_VALUE } else: settings.trackerSettings['SPLITTING_FEATURE_PENALTIES'] = {} #merging settings settings.trackerSettings['ALLOW_TRACK_MERGING'] = ALLOW_TRACK_MERGING if ALLOW_TRACK_MERGING == True: settings.trackerSettings['MERGING_MAX_DISTANCE'] = MERGING_MAX_DISTANCE if MERGING_FEATURE_PENALTIES == True: settings.trackerSettings['MERGING_FEATURE_PENALTIES'] = { MERGING_FEATURE_PENALTIES_TYPE: MERGING_FEATURE_PENALTIES_VALUE } else: settings.trackerSettings['MERGING_FEATURE_PENALTIES'] = {} print(settings.trackerSettings) # Configure track analyzers - Later on we want to filter out tracks # based on their displacement, so we need to state that we want # track displacement to be calculated. By default, out of the GUI, # not features are calculated. # The displacement feature is provided by the TrackDurationAnalyzer. settings.addTrackAnalyzer(TrackDurationAnalyzer()) settings.addTrackAnalyzer(TrackSpotQualityFeatureAnalyzer()) # Configure track filters - We want to get rid of the two immobile spots at # the bottom right of the image. Track displacement must be above 10 pixels. filter3 = FeatureFilter('TRACK_DISPLACEMENT', TRACK_DISPLACEMENT, True) filter4 = FeatureFilter('TRACK_START', TRACK_START, False) #filter5 = FeatureFilter('TRACK_STOP', float(imp.getStack().getSize())-1.1, True) settings.addTrackFilter(filter3) settings.addTrackFilter(filter4) #settings.addTrackFilter(filter5) #------------------- # Instantiate plugin #------------------- trackmate = TrackMate(model, settings) #-------- # Process #-------- ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) ok = trackmate.process() # if not ok: #sys.exit(str(trackmate.getErrorMessage())) #---------------- # Display results #---------------- #Set output folder and filename and create output folder well_folder = os.path.join(folder_w, filename) output_folder = os.path.join(well_folder, "Tracking") create_folder(output_folder) xml_file_name = filename + "_" + correction + "_trackmate_analysis.xml" folder_filename_xml = os.path.join(output_folder, xml_file_name) #ExportTracksToXML.export(model, settings, File(folder_filename_xml)) outfile = TmXmlWriter(File(folder_filename_xml)) outfile.appendSettings(settings) outfile.appendModel(model) outfile.writeToFile() # Echo results with the logger we set at start: #model.getLogger().log(str(model)) #create araray of timepoint length with filled 0 cell_counts = zerolistmaker(imp.getStack().getSize()) if ok: for id in model.getTrackModel().trackIDs(True): # Fetch the track feature from the feature model. track = model.getTrackModel().trackSpots(id) for spot in track: # Fetch spot features directly from spot. t = spot.getFeature('FRAME') print(t) cell_counts[int(t)] = cell_counts[int(t)] + 1 else: print("No spots detected!") if HEADLESS == False: selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() del imp return (cell_counts + [len(model.getTrackModel().trackIDs(True))])
def processImages(cfg, wellName, wellPath, images): firstImage = IJ.openImage(images[0][0][0][0]) imgWidth = firstImage.getWidth() imgHeight = firstImage.getHeight() for c in range(0, cfg.getValue(ELMConfig.numChannels)): chanName = cfg.getValue(ELMConfig.chanLabel)[c] if cfg.getValue(ELMConfig.chanLabel)[c] in cfg.getValue( ELMConfig.chansToSkip): continue imColorSeq = ImageStack(imgWidth, imgHeight) imSeq = ImageStack(imgWidth, imgHeight) totalHist = [] for z in range(0, cfg.getValue(ELMConfig.numZ)): for t in range(0, cfg.getValue(ELMConfig.numT)): currIP = IJ.openImage(images[c][z][t][0]) imColorSeq.addSlice(currIP.duplicate().getProcessor()) currIP = ELMImageUtils.getGrayScaleImage( currIP, c, chanName, cfg) imSeq.addSlice(currIP.getProcessor()) imgStats = currIP.getStatistics() currHist = imgStats.getHistogram() if not totalHist: for i in range(len(currHist)): totalHist.append(currHist[i]) else: for i in range(len(currHist)): totalHist[i] += currHist[i] if cfg.hasValue(ELMConfig.thresholdFromWholeRange) and cfg.getValue( ELMConfig.thresholdFromWholeRange) == True: threshMethod = "Otsu" # Default works very poorly for this data if cfg.hasValue(ELMConfig.thresholdMethod): threshMethod = cfg.getValue(ELMConfig.thresholdMethod) thresholder = AutoThresholder() computedThresh = thresholder.getThreshold(threshMethod, totalHist) cfg.setValue(ELMConfig.imageThreshold, computedThresh) print("\tComputed threshold from total hist (" + threshMethod + "): " + str(computedThresh)) print() else: print("\tUsing threshold computed on individual images!") print() computedThresh = 0 chanName = cfg.getValue(ELMConfig.chanLabel)[c] imp = ImagePlus() imp.setStack(imSeq) imp.setDimensions(1, 1, cfg.getValue(ELMConfig.numT)) imp.setTitle(wellName + ", channel " + str(c)) impColor = ImagePlus() impColor.setStack(imColorSeq) impColor.setDimensions(1, 1, cfg.getValue(ELMConfig.numT)) impColor.setTitle(wellName + ", channel " + str(c) + " (Color)") #---------------------------- # Create the model object now #---------------------------- # Some of the parameters we configure below need to have # a reference to the model at creation. So we create an # empty model now. model = Model() # Send all messages to ImageJ log window. model.setLogger(Logger.IJ_LOGGER) pa_features = [ "Area", "PercentArea", "Mean", "StdDev", "Mode", "Min", "Max", "X", "Y", "XM", "YM", "Perim.", "BX", "BY", "Width", "Height", "Major", "Minor", "Angle", "Circ.", "Feret", "IntDen", "Median", "Skew", "Kurt", "RawIntDen", "FeretX", "FeretY", "FeretAngle", "MinFeret", "AR", "Round", "Solidity" ] featureNames = {} featureShortNames = {} featureDimensions = {} isInt = {} for feature in pa_features: featureNames[feature] = feature featureShortNames[feature] = feature featureDimensions[feature] = Dimension.STRING isInt[feature] = False model.getFeatureModel().declareSpotFeatures(pa_features, featureNames, featureShortNames, featureDimensions, isInt) #------------------------ # Prepare settings object #------------------------ settings = Settings() settings.setFrom(imp) dbgPath = os.path.join(wellPath, 'debugImages_' + chanName) if not os.path.exists(dbgPath): os.makedirs(dbgPath) if cfg.hasValue(ELMConfig.thresholdMethod): threshMethod = cfg.getValue(ELMConfig.thresholdMethod) else: threshMethod = "Default" # Configure detector - We use the Strings for the keys settings.detectorFactory = ThresholdDetectorFactory() settings.detectorSettings = { 'THRESHOLD': computedThresh, 'ABOVE': True, 'DEBUG_MODE': True, 'DEBUG_OUTPATH': dbgPath, 'THRESHOLD_METHOD': threshMethod } #settings.detectorFactory = LocalThresholdDetectorFactory() #settings.detectorSettings = { # 'THRESHOLD' : computedThresh, # 'DEBUG_MODE' : True, # 'DEBUG_OUTPATH' : dbgPath #} # Configure spot filters - Classical filter on quality filter1 = FeatureFilter('QUALITY', 150, True) settings.addSpotFilter(filter1) # Configure tracker - We want to allow merges and fusions settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap( ) # almost good enough # Linking settings.trackerSettings[TrackerKeys.KEY_LINKING_MAX_DISTANCE] = 220.0 # in pixels linkFeaturePenalties = HashMap() linkFeaturePenalties['Area'] = 1.0 linkFeaturePenalties['POSITION_X'] = 1.0 linkFeaturePenalties['POSITION_Y'] = 1.0 #linkFeaturePenalties['Circ.'] = 1.0 #linkFeaturePenalties['Mean'] = 1.0 settings.trackerSettings[ TrackerKeys.KEY_LINKING_FEATURE_PENALTIES] = linkFeaturePenalties # Gap closing settings.trackerSettings[TrackerKeys.KEY_ALLOW_GAP_CLOSING] = True settings.trackerSettings[TrackerKeys.KEY_GAP_CLOSING_MAX_FRAME_GAP] = 8 settings.trackerSettings[ TrackerKeys.KEY_GAP_CLOSING_MAX_DISTANCE] = 120.0 # in pixels #settings.trackerSettings[TrackerKeys.KEY_GAP_CLOSING_FEATURE_PENALTIES] = new HashMap<>(DEFAULT_GAP_CLOSING_FEATURE_PENALTIES)); # Track splitting settings.trackerSettings[TrackerKeys.KEY_ALLOW_TRACK_SPLITTING] = False settings.trackerSettings[TrackerKeys.KEY_SPLITTING_MAX_DISTANCE] = 45.0 # in pixels #settings.trackerSettings[TrackerKeys.KEY_SPLITTING_FEATURE_PENALTIES] = new HashMap<>(DEFAULT_SPLITTING_FEATURE_PENALTIES)); # Track merging settings.trackerSettings[TrackerKeys.KEY_ALLOW_TRACK_MERGING] = True settings.trackerSettings[TrackerKeys.KEY_MERGING_MAX_DISTANCE] = 45.0 # in pixels #settings.trackerSettings[TrackerKeys.KEY_MERGING_FEATURE_PENALTIES] = new HashMap<>(DEFAULT_MERGING_FEATURE_PENALTIES)); # Others settings.trackerSettings[TrackerKeys.KEY_BLOCKING_VALUE] = float("inf") settings.trackerSettings[ TrackerKeys.KEY_ALTERNATIVE_LINKING_COST_FACTOR] = 1.05 settings.trackerSettings[TrackerKeys.KEY_CUTOFF_PERCENTILE] = 0.9 # Configure track analyzers - Later on we want to filter out tracks # based on their displacement, so we need to state that we want # track displacement to be calculated. By default, out of the GUI, # no features are calculated. # The displacement feature is provided by the TrackDurationAnalyzer. settings.addTrackAnalyzer(TrackDurationAnalyzer()) settings.addTrackAnalyzer(TrackBranchingAnalyzer()) settings.addTrackAnalyzer(TrackIndexAnalyzer()) settings.addTrackAnalyzer(TrackLocationAnalyzer()) settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer()) settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) # Configure track filters - We want to get rid of the two immobile spots at # the bottom right of the image. Track displacement must be above 10 pixels. #filter2 = FeatureFilter('TRACK_DISPLACEMENT', 1, True) #settings.addTrackFilter(filter2) #filter2 = FeatureFilter('TRACK_DISPLACEMENT', 1, True) #settings.addTrackFilter(filter2) #print("Spot feature analyzers: " + settings.toStringFeatureAnalyzersInfo()) #------------------- # Instantiate plugin #------------------- trackmate = TrackMate(model, settings) trackmate.setNumThreads(1) #-------- # Process #-------- ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) print("Processing " + chanName + "...") ok = trackmate.process() if not ok: sys.exit(str(trackmate.getErrorMessage())) #---------------- # Display results #---------------- print("Rendering...") # Set spot names based on track IDs # This allows track IDs to be displayed in the rendered video for tId in model.getTrackModel().trackIDs(True): trackSpots = model.getTrackModel().trackSpots(tId) for spot in trackSpots: spot.setName(str(tId)) # Determine sub-tracks within a track # Since tracks can merge, we want to keep track of which track a spot is # in prior to the merge spotToSubTrackMap = {} spotIt = model.getSpots().iterator(False) trackModel = model.getTrackModel() subTrackCount = {} while spotIt.hasNext(): spot = spotIt.next() spotEdges = trackModel.edgesOf(spot) # Find merge points within a track: ignore spots with fewer than 2 edges if (len(spotEdges) < 2): continue # We have a merge if we have multiple incoming edges incomingEdges = 0 edgeIt = spotEdges.iterator() ancestorSpots = [] while edgeIt.hasNext(): edge = edgeIt.next() src = trackModel.getEdgeSource(edge) dst = trackModel.getEdgeTarget(edge) if dst.ID() == spot.ID(): ancestorSpots.append(src) incomingEdges += 1 # Ignore non-merges if incomingEdges < 2: continue trackId = trackModel.trackIDOf(spot) if trackId in subTrackCount: subTrackId = subTrackCount[trackId] else: subTrackId = 1 for ancestorSpot in ancestorSpots: labelSubTrackAncestors(trackModel, spotToSubTrackMap, ancestorSpot, subTrackId, trackId, False) subTrackId += 1 subTrackCount[trackId] = subTrackId # Spots after the last merge still need to be labeled for tId in trackModel.trackIDs(True): trackSpots = trackModel.trackSpots(tId) spotIt = trackSpots.iterator() lastSpot = None while spotIt.hasNext(): spot = spotIt.next() outgoingEdges = 0 spotEdges = trackModel.edgesOf(spot) edgeIt = spotEdges.iterator() while edgeIt.hasNext(): edge = edgeIt.next() src = trackModel.getEdgeSource(edge) dst = trackModel.getEdgeTarget(edge) if src.ID() == spot.ID(): outgoingEdges += 1 if outgoingEdges == 0 and len(spotEdges) > 0: lastSpot = spot if tId in subTrackCount: subTrackId = subTrackCount[tId] else: subTrackId = 1 if not lastSpot == None: labelSubTrackAncestors(trackModel, spotToSubTrackMap, lastSpot, subTrackId, tId, True) # Create output file trackOut = os.path.join(wellPath, chanName + "_spotToTrackMap.csv") trackFile = open(trackOut, 'w') # Fetch the track feature from the feature model. trackFile.write('Spot Id, Track Sub Id, Track Id, Frame \n') for spotId in spotToSubTrackMap: trackFile.write( str(spotId) + ', ' + ','.join(spotToSubTrackMap[spotId]) + '\n') trackFile.close() # Write Edge Set trackOut = os.path.join(wellPath, chanName + "_mergeEdgeSet.csv") trackFile = open(trackOut, 'w') trackFile.write('Track Id, Spot Id, Spot Id \n') edgeIt = trackModel.edgeSet().iterator() while edgeIt.hasNext(): edge = edgeIt.next() src = trackModel.getEdgeSource(edge) dst = trackModel.getEdgeTarget(edge) trackId = trackModel.trackIDOf(edge) srcSubTrack = spotToSubTrackMap[src.ID()][0] dstSubTrack = spotToSubTrackMap[dst.ID()][0] if not srcSubTrack == dstSubTrack: trackFile.write( str(trackId) + ', ' + str(src.ID()) + ', ' + str(dst.ID()) + '\n') trackFile.close() selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, impColor) displayer.setDisplaySettings( TrackMateModelView.KEY_TRACK_COLORING, PerTrackFeatureColorGenerator(model, TrackIndexAnalyzer.TRACK_INDEX)) displayer.setDisplaySettings( TrackMateModelView.KEY_SPOT_COLORING, SpotColorGeneratorPerTrackFeature(model, TrackIndexAnalyzer.TRACK_INDEX)) displayer.setDisplaySettings(TrackMateModelView.KEY_DISPLAY_SPOT_NAMES, True) displayer.setDisplaySettings( TrackMateModelView.KEY_TRACK_DISPLAY_MODE, TrackMateModelView.TRACK_DISPLAY_MODE_LOCAL_BACKWARD_QUICK) displayer.setDisplaySettings( TrackMateModelView.KEY_TRACK_DISPLAY_DEPTH, 2) displayer.render() displayer.refresh() trackmate.getSettings().imp = impColor coa = CaptureOverlayAction(None) coa.execute(trackmate) WindowManager.setTempCurrentImage(coa.getCapture()) IJ.saveAs('avi', os.path.join(wellPath, chanName + "_out.avi")) imp.close() impColor.close() displayer.clear() displayer.getImp().hide() displayer.getImp().close() coa.getCapture().hide() coa.getCapture().close() # Echo results with the logger we set at start: model.getLogger().log(str(model)) # The feature model, that stores edge and track features. fm = model.getFeatureModel() # Write output for tracks numTracks = model.getTrackModel().trackIDs(True).size() print "Writing track data for " + str(numTracks) + " tracks." trackDat = {} for tId in model.getTrackModel().trackIDs(True): track = model.getTrackModel().trackSpots(tId) # Ensure track spots dir exists trackOut = os.path.join(wellPath, chanName + "_track_spots") if not os.path.exists(trackOut): os.makedirs(trackOut) # Create output file trackOut = os.path.join(trackOut, "track_" + str(tId) + ".csv") trackFile = open(trackOut, 'w') # Write Header header = 'Name, ID, Frame, ' for feature in track.toArray()[0].getFeatures().keySet(): if feature == 'Frame': continue header += feature + ", " header = header[0:len(header) - 2] header += '\n' trackFile.write(header) # Write spot data avgTotalIntensity = 0 for spot in track: #print spot.echo() data = [ spot.getName(), str(spot.ID()), str(spot.getFeature('FRAME')) ] for feature in spot.getFeatures(): if feature == 'Frame': continue elif feature == 'TOTAL_INTENSITY': avgTotalIntensity += spot.getFeature(feature) data.append(str(spot.getFeature(feature))) trackFile.write(','.join(data) + '\n') trackFile.close() avgTotalIntensity /= len(track) # Write out track stats # Make sure dir exists trackOut = os.path.join(wellPath, chanName + "_tracks") if not os.path.exists(trackOut): os.makedirs(trackOut) # Create output file trackOut = os.path.join(trackOut, "track_" + str(tId) + ".csv") trackFile = open(trackOut, 'w') # Fetch the track feature from the feature model. header = '' for featName in fm.getTrackFeatureNames(): header += featName + ", " header = header[0:len(header) - 2] header += '\n' trackFile.write(header) features = '' for featName in fm.getTrackFeatureNames(): features += str(fm.getTrackFeature(tId, featName)) + ', ' features = features[0:len(features) - 2] features += '\n' trackFile.write(features) trackFile.write('\n') trackFile.close() trackDat[tId] = [ str(tId), str(fm.getTrackFeature(tId, 'TRACK_DURATION')), str(avgTotalIntensity), str(fm.getTrackFeature(tId, 'TRACK_START')), str(fm.getTrackFeature(tId, 'TRACK_STOP')) ] # Create output file trackOut = os.path.join(wellPath, chanName + "_trackSummary.csv") trackFile = open(trackOut, 'w') # Fetch the track feature from the feature model. trackFile.write( 'Track Id, Duration, Avg Total Intensity, Start Frame, Stop Frame \n' ) for track in trackDat: trackFile.write(','.join(trackDat[track]) + '\n') trackFile.close() trackOut = os.path.join(wellPath, chanName + "_trackModel.xml") trackFile = File(trackOut) writer = TmXmlWriter(trackFile, model.getLogger()) #writer.appendLog( logPanel.getTextContent() ); writer.appendModel(trackmate.getModel()) writer.appendSettings(trackmate.getSettings()) #writer.appendGUIState( controller.getGuimodel() ); writer.writeToFile() model.clearSpots(True) model.clearTracks(True) return trackDat
def run_trackmate(imp, path, filename, params, batch_mode=False): # initialize trackmate model model = Model() # Set logger - use to see outputs, not needed in batch mode model.setLogger(Logger.IJ_LOGGER) # Create setting object from image settings = Settings() settings.setFrom(imp) cal = imp.getCalibration() model.setPhysicalUnits("micron", "sec") # Configure detector settings.detectorFactory = LogDetectorFactory() # settings.detectorFactory = DogDetectorFactory() settings.detectorSettings = { 'DO_SUBPIXEL_LOCALIZATION': params.do_subpixel_localization, 'RADIUS': params.radius, 'TARGET_CHANNEL': 0, 'THRESHOLD': params.threshold, 'DO_MEDIAN_FILTERING': params.do_median_filtering, } # print(params) # Add spot filters filter_quality = FeatureFilter('QUALITY', params.quality, True) settings.addSpotFilter(filter_quality) filter_snr = FeatureFilter('SNR', params.snr, True) settings.addSpotFilter(filter_snr) # Compute spot features settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) # Compute track features settings.addTrackAnalyzer(TrackBranchingAnalyzer()) settings.addTrackAnalyzer(TrackDurationAnalyzer()) settings.addTrackAnalyzer(TrackIndexAnalyzer()) settings.addTrackAnalyzer(TrackLocationAnalyzer()) settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer()) settings.addTrackAnalyzer(TrackSpotQualityFeatureAnalyzer()) # Update model ModelFeatureUpdater(model, settings) # Configure tracker settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() settings.trackerSettings[ 'LINKING_MAX_DISTANCE'] = params.linking_max_distance settings.trackerSettings[ 'GAP_CLOSING_MAX_DISTANCE'] = params.gap_closing_max_distance settings.trackerSettings['MAX_FRAME_GAP'] = params.max_frame_gap # Add track filters filter_T1 = FeatureFilter('TRACK_DURATION', params.track_duration, True) filter_MTD = FeatureFilter('TRACK_DISPLACEMENT', params.track_displacement, True) settings.addTrackFilter(filter_T1) settings.addTrackFilter(filter_MTD) # Instantiate trackmate trackmate = TrackMate(model, settings) # Execute all ok = trackmate.checkInput() if not ok: IJ.showMessage("No spots found... Adjust detection parameter.\n" + str(trackmate.getErrorMessage())) sys.exit(str(trackmate.getErrorMessage())) ok = trackmate.process() if not ok: IJ.showMessage("No spots found... Adjust detection parameter.\n" + str(trackmate.getErrorMessage())) sys.exit(str(trackmate.getErrorMessage())) filename = os.path.splitext(filename)[0] #filename without extension outFile = File(os.path.join(path, filename + "_Tracks.xml")) ExportTracksToXML.export(model, settings, outFile) #imp.close() tm_writer = TmXmlWriter(File(os.path.join(path, filename + "_TM.xml"))) tm_writer.appendModel(model) tm_writer.appendSettings(settings) tm_writer.writeToFile() if not batch_mode: selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() # Echo results with the logger we set at start: model.getLogger().log(str(model))