#------------------------- model = Model() # Set logger model.setLogger(Logger.IJ_LOGGER) #------------------------ # Prepare settings object #------------------------ settings = Settings() settings.setFrom(imp) # Configure detector settings.detectorFactory = DogDetectorFactory() settings.detectorSettings = {{ DetectorKeys.KEY_DO_SUBPIXEL_LOCALIZATION: True, DetectorKeys.KEY_RADIUS: 2.5, DetectorKeys.KEY_TARGET_CHANNEL: 1, DetectorKeys.KEY_THRESHOLD: 5., DetectorKeys.KEY_DO_MEDIAN_FILTERING: False, }} # Configure tracker settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() settings.trackerSettings['LINKING_MAX_DISTANCE'] = 10.0 settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 10.0 settings.trackerSettings['MAX_FRAME_GAP'] = 3
def run_trackmate( imp, path_out="./", detector="log", radius=2.5, threshold=0.0, median_filter=False ): """Log Trackmate detection run with given parameters. Saves spots in a csv file in the given path_out with encoded parameters. Args: imp: ImagePlus to be processed path_out: Output directory to save files. detector: Type of detection method. Options are 'log', 'dog'. radius: Radius of spots in pixels. threshold: Threshold value to filter spots. median_filter: True if median_filtering should be used. """ if imp.dimensions[2] != 1: raise ValueError( "Imp's dimensions must be [n, n, 1] but are " + imp.dimensions[2] ) # Create the model object now model = Model() model.setLogger(Logger.VOID_LOGGER) # Prepare settings object settings = Settings() settings.setFrom(imp) # Configure detector settings.detectorFactory = ( DogDetectorFactory() if detector == "dog" else LogDetectorFactory() ) settings.detectorSettings = { "DO_SUBPIXEL_LOCALIZATION": True, "RADIUS": radius, "TARGET_CHANNEL": 1, "THRESHOLD": threshold, "DO_MEDIAN_FILTERING": median_filter, } # Instantiate plugin trackmate = TrackMate(model, settings) # Process # output = trackmate.process() output = trackmate.execDetection() if not output: print("error process") return None # Get output from a single image fname = str(imp.title) spots = [["fname", "detector", "radius", "threshold", "median", "x", "y", "q"]] for spot in model.spots.iterator(0): x = spot.getFeature("POSITION_X") y = spot.getFeature("POSITION_Y") q = spot.getFeature("QUALITY") spots.append([fname, detector, radius, threshold, median_filter, x, y, q]) # Save output outname = os.path.splitext(os.path.basename(fname))[0] + "_" + str(radius) + ".csv" with open(os.path.join(path_out, outname), "wb") as f: wr = csv.writer(f) for row in spots: wr.writerow(row)
def runTrackMate(imp): import fiji.plugin.trackmate.Settings as Settings import fiji.plugin.trackmate.Model as Model import fiji.plugin.trackmate.SelectionModel as SelectionModel import fiji.plugin.trackmate.TrackMate as TrackMate import fiji.plugin.trackmate.Logger as Logger import fiji.plugin.trackmate.detection.DetectorKeys as DetectorKeys import fiji.plugin.trackmate.detection.DogDetectorFactory as DogDetectorFactory import fiji.plugin.trackmate.tracking.sparselap.SparseLAPTrackerFactory as SparseLAPTrackerFactory import fiji.plugin.trackmate.tracking.LAPUtils as LAPUtils import fiji.plugin.trackmate.visualization.hyperstack.HyperStackDisplayer as HyperStackDisplayer import fiji.plugin.trackmate.features.FeatureFilter as FeatureFilter import fiji.plugin.trackmate.features.FeatureAnalyzer as FeatureAnalyzer import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzerFactory as SpotContrastAndSNRAnalyzerFactory import fiji.plugin.trackmate.action.ExportStatsToIJAction as ExportStatsToIJAction import fiji.plugin.trackmate.io.TmXmlReader as TmXmlReader import fiji.plugin.trackmate.action.ExportTracksToXML as ExportTracksToXML import fiji.plugin.trackmate.io.TmXmlWriter as TmXmlWriter import fiji.plugin.trackmate.features.ModelFeatureUpdater as ModelFeatureUpdater import fiji.plugin.trackmate.features.SpotFeatureCalculator as SpotFeatureCalculator import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzer as SpotContrastAndSNRAnalyzer import fiji.plugin.trackmate.features.spot.SpotIntensityAnalyzerFactory as SpotIntensityAnalyzerFactory import fiji.plugin.trackmate.features.track.TrackSpeedStatisticsAnalyzer as TrackSpeedStatisticsAnalyzer import fiji.plugin.trackmate.util.TMUtils as TMUtils import fiji.plugin.trackmate.visualization.trackscheme.TrackScheme as TrackScheme import fiji.plugin.trackmate.visualization.PerTrackFeatureColorGenerator as PerTrackFeatureColorGenerator #------------------------- # Instantiate model object #------------------------- nFrames = imp.getNFrames() model = Model() # Set logger #model.setLogger(Logger.IJ_LOGGER) #------------------------ # Prepare settings object #------------------------ settings = Settings() settings.setFrom(imp) # Configure detector settings.detectorFactory = DogDetectorFactory() settings.detectorSettings = { DetectorKeys.KEY_DO_SUBPIXEL_LOCALIZATION: True, DetectorKeys.KEY_RADIUS: 12.30, DetectorKeys.KEY_TARGET_CHANNEL: 1, DetectorKeys.KEY_THRESHOLD: 100., DetectorKeys.KEY_DO_MEDIAN_FILTERING: False, } # Configure tracker settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() settings.trackerSettings['LINKING_MAX_DISTANCE'] = 10.0 settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 10.0 settings.trackerSettings['MAX_FRAME_GAP'] = 3 # Add the analyzers for some spot features. # You need to configure TrackMate with analyzers that will generate # the data you need. # Here we just add two analyzers for spot, one that computes generic # pixel intensity statistics (mean, max, etc...) and one that computes # an estimate of each spot's SNR. # The trick here is that the second one requires the first one to be in # place. Be aware of this kind of gotchas, and read the docs. settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) # Add an analyzer for some track features, such as the track mean speed. settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer()) settings.initialSpotFilterValue = 1 print(str(settings)) #---------------------- # Instantiate trackmate #---------------------- trackmate = TrackMate(model, settings) #------------ # Execute all #------------ ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) ok = trackmate.process() if not ok: sys.exit(str(trackmate.getErrorMessage())) #---------------- # Display results #---------------- selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() #--------------------- # Select correct spots #--------------------- # Prepare display. sm = SelectionModel(model) color = PerTrackFeatureColorGenerator(model, 'TRACK_INDEX') # launch TrackScheme to select spots and tracks trackscheme = TrackScheme(model, sm) trackscheme.setDisplaySettings('TrackColoring', color) trackscheme.render() # Update image with TrackScheme commands view = HyperStackDisplayer(model, sm, imp) view.setDisplaySettings('TrackColoring', color) view.render() # Wait for the user to select correct spots and tracks before collecting data dialog = WaitForUserDialog( "Spots", "Delete incorrect spots and edit tracks if necessary. (Press ESC to cancel analysis)" ) dialog.show() if dialog.escPressed(): IJ.run("Remove Overlay", "") imp.close() return ([], nFrames) # The feature model, that stores edge and track features. #model.getLogger().log('Found ' + str(model.getTrackModel().nTracks(True)) + ' tracks.') fm = model.getFeatureModel() crds_perSpot = [] for id in model.getTrackModel().trackIDs(True): # Fetch the track feature from the feature model.(remove """ to enable) """v = fm.getTrackFeature(id, 'TRACK_MEAN_SPEED') model.getLogger().log('') model.getLogger().log('Track ' + str(id) + ': mean velocity = ' + str(v) + ' ' + model.getSpaceUnits() + '/' + model.getTimeUnits())""" trackID = str(id) track = model.getTrackModel().trackSpots(id) spot_track = {} for spot in track: sid = spot.ID() # Fetch spot features directly from spot. x = spot.getFeature('POSITION_X') y = spot.getFeature('POSITION_Y') t = spot.getFeature('FRAME') q = spot.getFeature('QUALITY') snr = spot.getFeature('SNR') mean = spot.getFeature('MEAN_INTENSITY') #model.getLogger().log('\tspot ID = ' + str(sid) + ', x='+str(x)+', y='+str(y)+', t='+str(t)+', q='+str(q) + ', snr='+str(snr) + ', mean = ' + str(mean)) spot_track[t] = (x, y) crds_perSpot.append(spot_track) #print ("Spot", crds_perSpot.index(spot_track),"has the following coordinates:", crds_perSpot[crds_perSpot.index(spot_track)]) return (crds_perSpot, nFrames)
def getSpots(imp, channel, detector_type, radius, threshold, overlay, roi_type="large", roi_color=ColorRGB("blue")): """ Performs the detection, adding spots to the image overlay :imp: The image (ImagePlus) being analyzed :channel: The target channel :detector_type: A string describing the detector: "LoG" or "DoG" :radius: Spot radius (NB: trackmate GUI accepts diameter) :threshold: Quality cutoff value :overlay: The image overlay to store spot (MultiPoint) ROIs :roi_type: A string describing how spot ROIs should be displayed :returns: The n. of detected spots """ settings = Settings() settings.setFrom(imp) settings.detectorFactory = (LogDetectorFactory() if "LoG" in detector_type else DogDetectorFactory()) settings.detectorSettings = { DK.KEY_DO_SUBPIXEL_LOCALIZATION: False, DK.KEY_DO_MEDIAN_FILTERING: True, DK.KEY_TARGET_CHANNEL: channel, DK.KEY_RADIUS: radius, DK.KEY_THRESHOLD: threshold, } trackmate = TrackMate(settings) if not trackmate.execDetection(): lservice.error(str(trackmate.getErrorMessage())) return 0 model = trackmate.model spots = model.getSpots() count = spots.getNSpots(False) ch_id = "Spots Ch%d" % channel if count > 0: roi = None cal = imp.getCalibration() t_pos = imp.getT() if (t_pos > 1): lservice.warn("Only frame %d was considered..." % t_pos) for spot in spots.iterable(False): x = cal.getRawX(spot.getFeature(spot.POSITION_X)) y = cal.getRawY(spot.getFeature(spot.POSITION_Y)) z = spot.getFeature(spot.POSITION_Z) if z == 0 or not cal.pixelDepth or cal.pixelDepth == 0: z = 1 else: z = int(z // cal.pixelDepth) imp.setPosition(channel, z, t_pos) if roi is None: roi = PointRoi(int(x), int(y), imp) else: roi.addPoint(imp, x, y) roi.setStrokeColor(colorRGBtoColor(roi_color)) if "large" in roi_type: roi.setPointType(3) roi.setSize(4) else: roi.setPointType(2) roi.setSize(1) overlay.add(roi, ch_id) return count