def trackmate(self): calibration = self.imp.getCalibration() model = Model() model.setLogger(Logger.IJ_LOGGER) settings = Settings() settings.setFrom(self.imp) # Configure detector - We use the Strings for the keys settings.detectorFactory = LogDetectorFactory() settings.detectorSettings = { 'DO_SUBPIXEL_LOCALIZATION': True, 'RADIUS': calibration.getX(self.trackmateSize), 'TARGET_CHANNEL': 1, 'THRESHOLD': self.trackmateThreashold, 'DO_MEDIAN_FILTERING': True, } # Configure spot filters - Classical filter on quality filter1 = FeatureFilter('QUALITY', 0.01, True) settings.addSpotFilter(filter1) settings.addSpotAnalyzerFactory(SpotIntensityMultiCAnalyzerFactory()) settings.initialSpotFilterValue = 1 # Configure tracker - We want to allow merges and fusions settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() trackmate = TrackMate(model, settings) #-------- # Process #-------- ok = trackmate.checkInput() if not ok: print("NOT OK") ok = trackmate.process() if not ok: print("NOT OK") #---------------- # Display results #---------------- #selectionModel = SelectionModel(model) #displayer = HyperStackDisplayer(model, selectionModel, self.imp) #displayer.render() #displayer.refresh() # Echo results with the logger we set at start: spots = model.getSpots() return spots.iterable(True)
def detection(imp, c): cal = imp.getCalibration() model = Model() settings = Settings() settings.setFrom(imp) # Configure detector - Manually determined as best settings.detectorFactory = LogDetectorFactory() settings.detectorSettings = { 'DO_SUBPIXEL_LOCALIZATION': True, 'RADIUS': 2.0, 'TARGET_CHANNEL': c, 'THRESHOLD': 20.0, 'DO_MEDIAN_FILTERING': False, } settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() trackmate = TrackMate(model, settings) ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) try: ok = trackmate.process() except: IJ.log("Nothing detected") IJ.selectWindow('test') IJ.run('Close') else: selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() # Get spots information spots = model.getSpots() spotIt = spots.iterator(0, False) # Loop through spots and save into files # Fetch spot features directly from spot sid = [] x = [] y = [] q = [] r = [] spotID = 0 for spot in spotIt: spotID = spotID + 1 sid.append(spotID) x.append(spot.getFeature('POSITION_X')) y.append(spot.getFeature('POSITION_Y')) q.append(spot.getFeature('QUALITY')) r.append(spot.getFeature('RADIUS')) data = zip(sid, x, y, q, r) return data
def track(): imp = IJ.getImage() nChannels = imp.getNChannels() # Get the number of channels orgtitle = imp.getTitle() IJ.run("Subtract Background...", "rolling=50 sliding stack") IJ.run("Enhance Contrast...", "saturated=0.3") IJ.run("Multiply...", "value=10 stack") IJ.run("Subtract Background...", "rolling=50 sliding stack") IJ.run("Set Scale...", "distance=0") channels = ChannelSplitter.split(imp) imp_GFP = channels[0] imp_RFP = channels[1] IJ.selectWindow(orgtitle) IJ.run("Close") ic = ImageCalculator() imp_merge = ic.run("Add create stack", imp_GFP, imp_RFP) imp_merge.setTitle("add_channels") imp_merge.show() imp_RFP.show() imp_GFP.show() imp5 = ImagePlus() IJ.run(imp5, "Merge Channels...", "c1=[" + imp_merge.title + "] c2=" + imp_GFP.title + ' c3=' + imp_RFP.title + " create") print("c1=[" + imp_merge.title + "] c2=" + imp_GFP.title + ' c3=' + imp_RFP.title + " create") imp5.show() imp5 = IJ.getImage() nChannels = imp5.getNChannels() # Setup settings for TrackMate settings = Settings() settings.setFrom(imp5) # Spot analyzer: we want the multi-C intensity analyzer. settings.addSpotAnalyzerFactory(SpotMultiChannelIntensityAnalyzerFactory()) # Spot detector. settings.detectorFactory = LogDetectorFactory() settings.detectorSettings = settings.detectorFactory.getDefaultSettings() settings.detectorSettings['TARGET_CHANNEL'] = 1 settings.detectorSettings['RADIUS'] = 24.0 settings.detectorSettings['THRESHOLD'] = 0.0 # Spot tracker. # Configure tracker - We don't want to allow merges or splits settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() # almost good enough settings.trackerSettings['ALLOW_TRACK_SPLITTING'] = False settings.trackerSettings['ALLOW_TRACK_MERGING'] = False settings.trackerSettings['LINKING_MAX_DISTANCE'] = 8.0 settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 8.0 settings.trackerSettings['MAX_FRAME_GAP'] = 1 # Configure track filters settings.addTrackAnalyzer(TrackDurationAnalyzer()) settings.addTrackAnalyzer(TrackSpotQualityFeatureAnalyzer()) filter1 = FeatureFilter('TRACK_DURATION', 20, True) settings.addTrackFilter(filter1) # Run TrackMate and store data into Model. model = Model() trackmate = TrackMate(model, settings) ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) ok = trackmate.process() if not ok: sys.exit(str(trackmate.getErrorMessage())) selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp5) displayer.render() displayer.refresh() IJ.log('TrackMate completed successfully.') IJ.log('Found %d spots in %d tracks.' % (model.getSpots().getNSpots(True) , model.getTrackModel().nTracks(True))) # Print results in the console. headerStr = '%10s %10s %10s %10s %10s %10s' % ('Spot_ID', 'Track_ID', 'Frame', 'X', 'Y', 'Z') rowStr = '%10d %10d %10d %10.1f %10.1f %10.1f' for i in range( nChannels ): headerStr += (' %10s' % ( 'C' + str(i+1) ) ) rowStr += ( ' %10.1f' ) #open a file to save results myfile = open('/home/rickettsia/Desktop/data/Clamydial_Image_Analysis/EMS_BMECBMELVA_20X_01122019/data/'+orgtitle.split('.')[0]+'.csv', 'wb') wr = csv.writer(myfile, quoting=csv.QUOTE_ALL) wr.writerow(['Spot_ID', 'Track_ID', 'Frame', 'X', 'Y', 'Z', 'Channel_1', 'Channel_2']) IJ.log('\n') IJ.log(headerStr) tm = model.getTrackModel() trackIDs = tm.trackIDs(True) for trackID in trackIDs: spots = tm.trackSpots(trackID) # Let's sort them by frame. ls = ArrayList(spots) for spot in ls: values = [spot.ID(), trackID, spot.getFeature('FRAME'), \ spot.getFeature('POSITION_X'), spot.getFeature('POSITION_Y'), spot.getFeature('POSITION_Z')] for i in range(nChannels): values.append(spot.getFeature('MEAN_INTENSITY%02d' % (i+1))) IJ.log(rowStr % tuple(values)) l1 = (values[0], values[1], values[2], values[3], values[4], values[5], values[7], values[8]) wr.writerow(l1) myfile.close() IJ.selectWindow("Merged") IJ.run("Close")
y_lc = [None]*0 mn_int = [None]*0 inten = [None]*0 tr_dur = [None]*0 tr_start = [None]*0 tr_fin = [None]*0 spt_tr = [None]*0 spt_widt = [None]*0 tr_charact = [None]*0 x_tr = [None]*0 y_tr = [None]*0 spt_all_x = [None]*0 spt_all_y = [None]*0 tr_identifi = [None]*0 tr_fram = [None]*0 spt_m = model.getSpots() #tot_spts = spt_m.getNSpots(True) for id in model.getTrackModel().trackIDs(True): # Fetch the track feature from the feature model. v = fm.getTrackFeature(id, 'TRACK_MEAN_SPEED') med_v = fm.getTrackFeature(id, 'TRACK_MEDIAN_SPEED') min_v = fm.getTrackFeature(id, 'TRACK_MIN_SPEED') max_v = fm.getTrackFeature(id, 'TRACK_MAX_SPEED') std_v = fm.getTrackFeature(id, 'TRACK_STD_SPEED') q = fm.getTrackFeature(id, 'TRACK_MEAN_QUALITY') med_q = fm.getTrackFeature(id, 'TRACK_MEDIAN_QUALITY') min_q = fm.getTrackFeature(id, 'TRACK_MIN_QUALITY') max_q = fm.getTrackFeature(id, 'TRACK_MAX_QUALITY') std_q = fm.getTrackFeature(id, 'TRACK_STD_QUALITY') dura = fm.getTrackFeature(id, 'TRACK_DURATION')
settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() settings.trackerSettings['MAX_FRAME_GAP'] = frameGap settings.trackerSettings['LINKING_MAX_DISTANCE'] = linkingMax settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = closingMax # Run TrackMate and store data into Model. model = Model() trackmate = TrackMate(model, settings) if not trackmate.checkInput() or not trackmate.process(): IJ.log('Could not execute TrackMate: ' + str(trackmate.getErrorMessage())) else: IJ.log('TrackMate completed successfully.') IJ.log( 'Found %d spots in %d tracks.' % (model.getSpots().getNSpots(True), model.getTrackModel().nTracks(True))) # Print results in the console. headerStr = '%10s %10s %10s %10s %10s %10s' % ('Spot_ID', 'Track_ID', 'Frame', 'X', 'Y', 'Z') rowStr = '%10d %10d %10d %10.1f %10.1f %10.1f' for i in range(nChannels): headerStr += (' %10s' % ('C' + str(i + 1))) rowStr += (' %10.1f') IJ.log('\n') IJ.log(headerStr) tm = model.getTrackModel() trackIDs = tm.trackIDs(True) for trackID in trackIDs:
settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.trackerSettings['LINKING_MAX_DISTANCE'] = 1.000 settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 1.000 settings.trackerSettings['MAX_FRAME_GAP'] = 3 #Running specific trackmate tasks trackmate = TrackMate(model, settings) trackmate.setNumThreads(1) trackmate.execDetection() trackmate.execInitialSpotFiltering() trackmate.computeSpotFeatures(False) trackmate.execSpotFiltering(False) #get spot features fm = model.getFeatureModel() all_spots = model.getSpots() quality = all_spots.collectValues('QUALITY', False) optimalQuality = fiji.plugin.trackmate.util.TMUtils.otsuThreshold( quality) * subtraction #2. trackmate.setNumThreads(4) model = Model() model.setLogger(Logger.IJ_LOGGER) settings = Settings() settings.setFrom(imp) stringOptimalQuality = str(optimalQuality) model.getLogger().log(stringOptimalQuality) #3. # Configure detector
# Spot tracker. settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() settings.trackerSettings['MAX_FRAME_GAP'] = frameGap settings.trackerSettings['LINKING_MAX_DISTANCE'] = linkingMax settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = closingMax # Run TrackMate and store data into Model. model = Model() trackmate = TrackMate(model, settings) if not trackmate.checkInput() or not trackmate.process(): IJ.log('Could not execute TrackMate: ' + str( trackmate.getErrorMessage() ) ) else: IJ.log('TrackMate completed successfully.' ) IJ.log( 'Found %d spots in %d tracks.' % ( model.getSpots().getNSpots( True ) , model.getTrackModel().nTracks( True ) ) ) # Print results in the console. headerStr = '%10s %10s %10s %10s %10s %10s' % ( 'Spot_ID', 'Track_ID', 'Frame', 'X', 'Y', 'Z' ) rowStr = '%10d %10d %10d %10.1f %10.1f %10.1f' for i in range( nChannels ): headerStr += ( ' %10s' % ( 'C' + str(i+1) ) ) rowStr += ( ' %10.1f' ) IJ.log('\n') IJ.log( headerStr) tm = model.getTrackModel() trackIDs = tm.trackIDs( True ) for trackID in trackIDs: spots = tm.trackSpots( trackID )
settings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 5. settings.trackerSettings['ALLOW_TRACK_SPLITTING'] = False settings.trackerSettings['ALLOW_TRACK_MERGING'] = False #------------------- # Instantiate plugin #------------------- trackmate = TrackMate(model, settings) if not trackmate.checkInput() or not trackmate.process(): IJ.log('Could not execute TrackMate: ' + str(trackmate.getErrorMessage())) else: IJ.log('TrackMate completed successfully.') IJ.log('Found %d spots in %d tracks.' % (model.getSpots().getNSpots(True), model.getTrackModel().nTracks(True))) # Now open the output file & write a header with each field you need. Fields: # Spot_ID, which is unique for each identified object # Track_ID, which is unique for each 'track' - i.e. same object over time # Frame, which specifies the current frame # X-Y-Z, which are the spatial locations of the tracked object at the current frame. headerStr = '%10s %10s %10s %10s %10s %10s' % ('Spot_ID', 'Track_ID', 'Frame', 'X', 'Y', 'Z') rowStr = '%10d %10d %10d %10.1f %10.1f %10.1f' # Finally, we also want the mean intensity of each tracked object in each frame. # For this, we get an additional column for each channel in the image. for i in range(nChannels): headerStr += (' %10s' % ('C' + str(i + 1))) rowStr += (' %10.1f')
def processImages(cfg, wellName, wellPath, images): firstImage = IJ.openImage(images[0][0][0][0]) imgWidth = firstImage.getWidth() imgHeight = firstImage.getHeight() for c in range(0, cfg.getValue(ELMConfig.numChannels)): chanName = cfg.getValue(ELMConfig.chanLabel)[c] if cfg.getValue(ELMConfig.chanLabel)[c] in cfg.getValue( ELMConfig.chansToSkip): continue imColorSeq = ImageStack(imgWidth, imgHeight) imSeq = ImageStack(imgWidth, imgHeight) totalHist = [] for z in range(0, cfg.getValue(ELMConfig.numZ)): for t in range(0, cfg.getValue(ELMConfig.numT)): currIP = IJ.openImage(images[c][z][t][0]) imColorSeq.addSlice(currIP.duplicate().getProcessor()) currIP = ELMImageUtils.getGrayScaleImage( currIP, c, chanName, cfg) imSeq.addSlice(currIP.getProcessor()) imgStats = currIP.getStatistics() currHist = imgStats.getHistogram() if not totalHist: for i in range(len(currHist)): totalHist.append(currHist[i]) else: for i in range(len(currHist)): totalHist[i] += currHist[i] if cfg.hasValue(ELMConfig.thresholdFromWholeRange) and cfg.getValue( ELMConfig.thresholdFromWholeRange) == True: threshMethod = "Otsu" # Default works very poorly for this data if cfg.hasValue(ELMConfig.thresholdMethod): threshMethod = cfg.getValue(ELMConfig.thresholdMethod) thresholder = AutoThresholder() computedThresh = thresholder.getThreshold(threshMethod, totalHist) cfg.setValue(ELMConfig.imageThreshold, computedThresh) print("\tComputed threshold from total hist (" + threshMethod + "): " + str(computedThresh)) print() else: print("\tUsing threshold computed on individual images!") print() computedThresh = 0 chanName = cfg.getValue(ELMConfig.chanLabel)[c] imp = ImagePlus() imp.setStack(imSeq) imp.setDimensions(1, 1, cfg.getValue(ELMConfig.numT)) imp.setTitle(wellName + ", channel " + str(c)) impColor = ImagePlus() impColor.setStack(imColorSeq) impColor.setDimensions(1, 1, cfg.getValue(ELMConfig.numT)) impColor.setTitle(wellName + ", channel " + str(c) + " (Color)") #---------------------------- # Create the model object now #---------------------------- # Some of the parameters we configure below need to have # a reference to the model at creation. So we create an # empty model now. model = Model() # Send all messages to ImageJ log window. model.setLogger(Logger.IJ_LOGGER) pa_features = [ "Area", "PercentArea", "Mean", "StdDev", "Mode", "Min", "Max", "X", "Y", "XM", "YM", "Perim.", "BX", "BY", "Width", "Height", "Major", "Minor", "Angle", "Circ.", "Feret", "IntDen", "Median", "Skew", "Kurt", "RawIntDen", "FeretX", "FeretY", "FeretAngle", "MinFeret", "AR", "Round", "Solidity" ] featureNames = {} featureShortNames = {} featureDimensions = {} isInt = {} for feature in pa_features: featureNames[feature] = feature featureShortNames[feature] = feature featureDimensions[feature] = Dimension.STRING isInt[feature] = False model.getFeatureModel().declareSpotFeatures(pa_features, featureNames, featureShortNames, featureDimensions, isInt) #------------------------ # Prepare settings object #------------------------ settings = Settings() settings.setFrom(imp) dbgPath = os.path.join(wellPath, 'debugImages_' + chanName) if not os.path.exists(dbgPath): os.makedirs(dbgPath) if cfg.hasValue(ELMConfig.thresholdMethod): threshMethod = cfg.getValue(ELMConfig.thresholdMethod) else: threshMethod = "Default" # Configure detector - We use the Strings for the keys settings.detectorFactory = ThresholdDetectorFactory() settings.detectorSettings = { 'THRESHOLD': computedThresh, 'ABOVE': True, 'DEBUG_MODE': True, 'DEBUG_OUTPATH': dbgPath, 'THRESHOLD_METHOD': threshMethod } #settings.detectorFactory = LocalThresholdDetectorFactory() #settings.detectorSettings = { # 'THRESHOLD' : computedThresh, # 'DEBUG_MODE' : True, # 'DEBUG_OUTPATH' : dbgPath #} # Configure spot filters - Classical filter on quality filter1 = FeatureFilter('QUALITY', 150, True) settings.addSpotFilter(filter1) # Configure tracker - We want to allow merges and fusions settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap( ) # almost good enough # Linking settings.trackerSettings[TrackerKeys.KEY_LINKING_MAX_DISTANCE] = 220.0 # in pixels linkFeaturePenalties = HashMap() linkFeaturePenalties['Area'] = 1.0 linkFeaturePenalties['POSITION_X'] = 1.0 linkFeaturePenalties['POSITION_Y'] = 1.0 #linkFeaturePenalties['Circ.'] = 1.0 #linkFeaturePenalties['Mean'] = 1.0 settings.trackerSettings[ TrackerKeys.KEY_LINKING_FEATURE_PENALTIES] = linkFeaturePenalties # Gap closing settings.trackerSettings[TrackerKeys.KEY_ALLOW_GAP_CLOSING] = True settings.trackerSettings[TrackerKeys.KEY_GAP_CLOSING_MAX_FRAME_GAP] = 8 settings.trackerSettings[ TrackerKeys.KEY_GAP_CLOSING_MAX_DISTANCE] = 120.0 # in pixels #settings.trackerSettings[TrackerKeys.KEY_GAP_CLOSING_FEATURE_PENALTIES] = new HashMap<>(DEFAULT_GAP_CLOSING_FEATURE_PENALTIES)); # Track splitting settings.trackerSettings[TrackerKeys.KEY_ALLOW_TRACK_SPLITTING] = False settings.trackerSettings[TrackerKeys.KEY_SPLITTING_MAX_DISTANCE] = 45.0 # in pixels #settings.trackerSettings[TrackerKeys.KEY_SPLITTING_FEATURE_PENALTIES] = new HashMap<>(DEFAULT_SPLITTING_FEATURE_PENALTIES)); # Track merging settings.trackerSettings[TrackerKeys.KEY_ALLOW_TRACK_MERGING] = True settings.trackerSettings[TrackerKeys.KEY_MERGING_MAX_DISTANCE] = 45.0 # in pixels #settings.trackerSettings[TrackerKeys.KEY_MERGING_FEATURE_PENALTIES] = new HashMap<>(DEFAULT_MERGING_FEATURE_PENALTIES)); # Others settings.trackerSettings[TrackerKeys.KEY_BLOCKING_VALUE] = float("inf") settings.trackerSettings[ TrackerKeys.KEY_ALTERNATIVE_LINKING_COST_FACTOR] = 1.05 settings.trackerSettings[TrackerKeys.KEY_CUTOFF_PERCENTILE] = 0.9 # Configure track analyzers - Later on we want to filter out tracks # based on their displacement, so we need to state that we want # track displacement to be calculated. By default, out of the GUI, # no features are calculated. # The displacement feature is provided by the TrackDurationAnalyzer. settings.addTrackAnalyzer(TrackDurationAnalyzer()) settings.addTrackAnalyzer(TrackBranchingAnalyzer()) settings.addTrackAnalyzer(TrackIndexAnalyzer()) settings.addTrackAnalyzer(TrackLocationAnalyzer()) settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer()) settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) # Configure track filters - We want to get rid of the two immobile spots at # the bottom right of the image. Track displacement must be above 10 pixels. #filter2 = FeatureFilter('TRACK_DISPLACEMENT', 1, True) #settings.addTrackFilter(filter2) #filter2 = FeatureFilter('TRACK_DISPLACEMENT', 1, True) #settings.addTrackFilter(filter2) #print("Spot feature analyzers: " + settings.toStringFeatureAnalyzersInfo()) #------------------- # Instantiate plugin #------------------- trackmate = TrackMate(model, settings) trackmate.setNumThreads(1) #-------- # Process #-------- ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) print("Processing " + chanName + "...") ok = trackmate.process() if not ok: sys.exit(str(trackmate.getErrorMessage())) #---------------- # Display results #---------------- print("Rendering...") # Set spot names based on track IDs # This allows track IDs to be displayed in the rendered video for tId in model.getTrackModel().trackIDs(True): trackSpots = model.getTrackModel().trackSpots(tId) for spot in trackSpots: spot.setName(str(tId)) # Determine sub-tracks within a track # Since tracks can merge, we want to keep track of which track a spot is # in prior to the merge spotToSubTrackMap = {} spotIt = model.getSpots().iterator(False) trackModel = model.getTrackModel() subTrackCount = {} while spotIt.hasNext(): spot = spotIt.next() spotEdges = trackModel.edgesOf(spot) # Find merge points within a track: ignore spots with fewer than 2 edges if (len(spotEdges) < 2): continue # We have a merge if we have multiple incoming edges incomingEdges = 0 edgeIt = spotEdges.iterator() ancestorSpots = [] while edgeIt.hasNext(): edge = edgeIt.next() src = trackModel.getEdgeSource(edge) dst = trackModel.getEdgeTarget(edge) if dst.ID() == spot.ID(): ancestorSpots.append(src) incomingEdges += 1 # Ignore non-merges if incomingEdges < 2: continue trackId = trackModel.trackIDOf(spot) if trackId in subTrackCount: subTrackId = subTrackCount[trackId] else: subTrackId = 1 for ancestorSpot in ancestorSpots: labelSubTrackAncestors(trackModel, spotToSubTrackMap, ancestorSpot, subTrackId, trackId, False) subTrackId += 1 subTrackCount[trackId] = subTrackId # Spots after the last merge still need to be labeled for tId in trackModel.trackIDs(True): trackSpots = trackModel.trackSpots(tId) spotIt = trackSpots.iterator() lastSpot = None while spotIt.hasNext(): spot = spotIt.next() outgoingEdges = 0 spotEdges = trackModel.edgesOf(spot) edgeIt = spotEdges.iterator() while edgeIt.hasNext(): edge = edgeIt.next() src = trackModel.getEdgeSource(edge) dst = trackModel.getEdgeTarget(edge) if src.ID() == spot.ID(): outgoingEdges += 1 if outgoingEdges == 0 and len(spotEdges) > 0: lastSpot = spot if tId in subTrackCount: subTrackId = subTrackCount[tId] else: subTrackId = 1 if not lastSpot == None: labelSubTrackAncestors(trackModel, spotToSubTrackMap, lastSpot, subTrackId, tId, True) # Create output file trackOut = os.path.join(wellPath, chanName + "_spotToTrackMap.csv") trackFile = open(trackOut, 'w') # Fetch the track feature from the feature model. trackFile.write('Spot Id, Track Sub Id, Track Id, Frame \n') for spotId in spotToSubTrackMap: trackFile.write( str(spotId) + ', ' + ','.join(spotToSubTrackMap[spotId]) + '\n') trackFile.close() # Write Edge Set trackOut = os.path.join(wellPath, chanName + "_mergeEdgeSet.csv") trackFile = open(trackOut, 'w') trackFile.write('Track Id, Spot Id, Spot Id \n') edgeIt = trackModel.edgeSet().iterator() while edgeIt.hasNext(): edge = edgeIt.next() src = trackModel.getEdgeSource(edge) dst = trackModel.getEdgeTarget(edge) trackId = trackModel.trackIDOf(edge) srcSubTrack = spotToSubTrackMap[src.ID()][0] dstSubTrack = spotToSubTrackMap[dst.ID()][0] if not srcSubTrack == dstSubTrack: trackFile.write( str(trackId) + ', ' + str(src.ID()) + ', ' + str(dst.ID()) + '\n') trackFile.close() selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, impColor) displayer.setDisplaySettings( TrackMateModelView.KEY_TRACK_COLORING, PerTrackFeatureColorGenerator(model, TrackIndexAnalyzer.TRACK_INDEX)) displayer.setDisplaySettings( TrackMateModelView.KEY_SPOT_COLORING, SpotColorGeneratorPerTrackFeature(model, TrackIndexAnalyzer.TRACK_INDEX)) displayer.setDisplaySettings(TrackMateModelView.KEY_DISPLAY_SPOT_NAMES, True) displayer.setDisplaySettings( TrackMateModelView.KEY_TRACK_DISPLAY_MODE, TrackMateModelView.TRACK_DISPLAY_MODE_LOCAL_BACKWARD_QUICK) displayer.setDisplaySettings( TrackMateModelView.KEY_TRACK_DISPLAY_DEPTH, 2) displayer.render() displayer.refresh() trackmate.getSettings().imp = impColor coa = CaptureOverlayAction(None) coa.execute(trackmate) WindowManager.setTempCurrentImage(coa.getCapture()) IJ.saveAs('avi', os.path.join(wellPath, chanName + "_out.avi")) imp.close() impColor.close() displayer.clear() displayer.getImp().hide() displayer.getImp().close() coa.getCapture().hide() coa.getCapture().close() # Echo results with the logger we set at start: model.getLogger().log(str(model)) # The feature model, that stores edge and track features. fm = model.getFeatureModel() # Write output for tracks numTracks = model.getTrackModel().trackIDs(True).size() print "Writing track data for " + str(numTracks) + " tracks." trackDat = {} for tId in model.getTrackModel().trackIDs(True): track = model.getTrackModel().trackSpots(tId) # Ensure track spots dir exists trackOut = os.path.join(wellPath, chanName + "_track_spots") if not os.path.exists(trackOut): os.makedirs(trackOut) # Create output file trackOut = os.path.join(trackOut, "track_" + str(tId) + ".csv") trackFile = open(trackOut, 'w') # Write Header header = 'Name, ID, Frame, ' for feature in track.toArray()[0].getFeatures().keySet(): if feature == 'Frame': continue header += feature + ", " header = header[0:len(header) - 2] header += '\n' trackFile.write(header) # Write spot data avgTotalIntensity = 0 for spot in track: #print spot.echo() data = [ spot.getName(), str(spot.ID()), str(spot.getFeature('FRAME')) ] for feature in spot.getFeatures(): if feature == 'Frame': continue elif feature == 'TOTAL_INTENSITY': avgTotalIntensity += spot.getFeature(feature) data.append(str(spot.getFeature(feature))) trackFile.write(','.join(data) + '\n') trackFile.close() avgTotalIntensity /= len(track) # Write out track stats # Make sure dir exists trackOut = os.path.join(wellPath, chanName + "_tracks") if not os.path.exists(trackOut): os.makedirs(trackOut) # Create output file trackOut = os.path.join(trackOut, "track_" + str(tId) + ".csv") trackFile = open(trackOut, 'w') # Fetch the track feature from the feature model. header = '' for featName in fm.getTrackFeatureNames(): header += featName + ", " header = header[0:len(header) - 2] header += '\n' trackFile.write(header) features = '' for featName in fm.getTrackFeatureNames(): features += str(fm.getTrackFeature(tId, featName)) + ', ' features = features[0:len(features) - 2] features += '\n' trackFile.write(features) trackFile.write('\n') trackFile.close() trackDat[tId] = [ str(tId), str(fm.getTrackFeature(tId, 'TRACK_DURATION')), str(avgTotalIntensity), str(fm.getTrackFeature(tId, 'TRACK_START')), str(fm.getTrackFeature(tId, 'TRACK_STOP')) ] # Create output file trackOut = os.path.join(wellPath, chanName + "_trackSummary.csv") trackFile = open(trackOut, 'w') # Fetch the track feature from the feature model. trackFile.write( 'Track Id, Duration, Avg Total Intensity, Start Frame, Stop Frame \n' ) for track in trackDat: trackFile.write(','.join(trackDat[track]) + '\n') trackFile.close() trackOut = os.path.join(wellPath, chanName + "_trackModel.xml") trackFile = File(trackOut) writer = TmXmlWriter(trackFile, model.getLogger()) #writer.appendLog( logPanel.getTextContent() ); writer.appendModel(trackmate.getModel()) writer.appendSettings(trackmate.getSettings()) #writer.appendGUIState( controller.getGuimodel() ); writer.writeToFile() model.clearSpots(True) model.clearTracks(True) return trackDat
def nucleus_detection(infile, nucleus_channel, stacksize, animation): # Detect nucleus with 3d log filters fullpath = infile infile = filename(infile) IJ.log("Start Segmentation " + str(infile)) # First get Nb Stacks reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(fullpath) default_options = "stack_order=XYCZT color_mode=Composite view=Hyperstack specify_range c_begin=" + \ str(nucleus_channel) + " c_end=" + str(nucleus_channel) + \ " c_step=1 open=[" + fullpath + "]" NbStack = reader.getSizeZ() reader.close() output = re.sub('.ids', '.csv', infile) with open(os.path.join(folder5, output), 'wb') as outfile: DETECTwriter = csv.writer(outfile, delimiter=',') DETECTwriter.writerow( ['spotID', 'roundID', 'X', 'Y', 'Z', 'QUALITY', 'SNR', 'INTENSITY']) rounds = NbStack // stacksize spotID = 1 for roundid in xrange(1, rounds + 2): # Process stacksize by stacksize otherwise crash because too many spots Zstart = (stacksize * roundid - stacksize + 1) Zend = (stacksize * roundid) if(Zend > NbStack): Zend = NbStack % stacksize + (roundid - 1) * stacksize IJ.log("Round:" + str(roundid) + ' Zstart=' + str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack)) IJ.run("Bio-Formats Importer", default_options + " z_begin=" + str(Zstart) + " z_end=" + str(Zend) + " z_step=1") imp = IJ.getImage() imp.show() cal = imp.getCalibration() model = Model() settings = Settings() settings.setFrom(imp) # Configure detector - Manually determined as best settings.detectorFactory = LogDetectorFactory() settings.detectorSettings = { 'DO_SUBPIXEL_LOCALIZATION': True, 'RADIUS': 5.5, 'TARGET_CHANNEL': 1, 'THRESHOLD': 50.0, 'DO_MEDIAN_FILTERING': False, } filter1 = FeatureFilter('QUALITY', 1, True) settings.addSpotFilter(filter1) settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() trackmate = TrackMate(model, settings) ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) try: ok = trackmate.process() except: IJ.log("Nothing detected, Round:" + str(roundid) + ' Zstart=' + str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack)) IJ.selectWindow(infile) IJ.run('Close') continue else: if animation: # For plotting purpose only imp.setPosition(1, 1, imp.getNFrames()) imp.getProcessor().setMinAndMax(0, 4000) selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() for i in xrange(1, imp.getNSlices() + 1): imp.setSlice(i) time.sleep(0.05) IJ.selectWindow(infile) IJ.run('Close') spots = model.getSpots() spotIt = spots.iterator(0, False) sid = [] sroundid = [] x = [] y = [] z = [] q = [] snr = [] intensity = [] for spot in spotIt: sid.append(spotID) spotID = spotID + 1 sroundid.append(roundid) x.append(spot.getFeature('POSITION_X')) y.append(spot.getFeature('POSITION_Y')) q.append(spot.getFeature('QUALITY')) snr.append(spot.getFeature('SNR')) intensity.append(spot.getFeature('MEAN_INTENSITY')) # Correct Z position correct_z = spot.getFeature( 'POSITION_Z') + (roundid - 1) * float(stacksize) * cal.pixelDepth z.append(correct_z) with open(os.path.join(folder5, output), 'ab') as outfile: DETECTwriter = csv.writer(outfile, delimiter=',') Sdata = zip(sid, sroundid, x, y, z, q, snr, intensity) for Srow in Sdata: DETECTwriter.writerow(Srow)