def main(tableName, showPlot): image = IJ.getImage(); roi = image.getRoi() if not roi: center = image.getWidth() / 2, image.getHeight() / 2 else: center = roi.getXBase(), roi.getYBase(); table = ResultsTable.getResultsTable(tableName) vectors = getVectorsFromTable(table, center) radialVelocity = calculateRadialVelocityPerTime(vectors, center) radialVelocityAndDistanceByTrack(table, center) stats = Tools.getStatistics(radialVelocity) median = calculateMedian(radialVelocity) rt = ResultsTable.getResultsTable(TABLE_NAME) if not rt: rt = ResultsTable() row = rt.getCounter() rt.setValue("label", row, tableName) rt.setValue("x", row, center[0]) rt.setValue("y", row, center[1]) rt.setValue("mean", row, stats.mean) rt.setValue("stdDev", row, stats.stdDev) rt.setValue("min", row, stats.min) rt.setValue("median", row, median) rt.setValue("max", row, stats.max) rt.show(TABLE_NAME) if showPlot: plot(radialVelocity, center)
def calculateRipley(tableName1,tableName2,volume,radiusMax=2,nbSteps=20): pointsA = pointList3DFromRT(tableName1) pointsB = pointList3DFromRT(tableName2) table = ResultsTable() step = float(radiusMax) / nbSteps idx = 0 IJ.log("Radius Max = "+str(radiusMax)) IJ.log("Step = "+str(step)) nbPoints = len(pointsA+pointsB) density = float(nbPoints)/float(volume) print(str(density)) #density = getDensity(pointsA,pointsB) #nbPoints = len(pointsA+pointsB) for i in range(1,nbSteps+1): radius = i*step table.setValue("Radius",idx,radius) count = countPointsCloser(pointsA,pointsB,radius) table.setValue("Count",idx,count) K = count/(density*nbPoints) table.setValue("Ripley's K",idx,K) expected = (4/3) * math.pi * radius * radius * radius table.setValue("Expected Ripley's K",idx,expected) table.setValue("Ripley's L",idx,pow(K/math.pi,1./3)-radius) idx = idx+1 table.show("Ripley's Table")
def calculateCostsOneLevel(self, level=1, previousCandidates=None, bestScore=3.1): print("Calculating Costs of Path with depth " + str(level)) # Create the PathList of Level level (pathLists, innateCosts, overlapCosts, leftoverCosts) = self.generatePathLists(level, previousCandidates, bestScore) table = ResultsTable() minTotalCost = 3 for pathListIndex, pathList in enumerate(pathLists, start=0): totalCost = (innateCosts[pathListIndex] + overlapCosts[pathListIndex] + leftoverCosts[pathListIndex]) minTotalCost = min(totalCost, minTotalCost) for pathIndex, path in enumerate(pathList): table.setValue("Path " + str(pathIndex), pathListIndex, "P-" + str(path.getID())) table.setValue("Innate Cost", pathListIndex, innateCosts[pathListIndex]) table.setValue("Overlap Cost", pathListIndex, overlapCosts[pathListIndex]) table.setValue("Leftover Cost", pathListIndex, leftoverCosts[pathListIndex]) table.setValue("Total Cost", pathListIndex, totalCost) table.show("Costs for Level " + str(level)) return (pathLists, minTotalCost)
def pixel_collector(rm, channel_imp, channel_name, impname, folder): # define new Results table rt = ResultsTable() IndRois = rm.getIndexes() for index in IndRois: ROI = rm.getRoi(index) ROI_name = ROI.getName() coords = ROI.getContainedPoints() row = 0 for pixel in coords: x_coord = pixel.getX() y_coord = pixel.getY() rt.setValue(ROI_name + "_X_pos", row, int(x_coord)) rt.setValue(ROI_name + "_Y_pos", row, int(y_coord)) pixel_2 = channel_imp.getProcessor().getPixel( int(x_coord), int(y_coord)) rt.setValue(ROI_name + "_" + channel_name, row, pixel_2) row = row + 1 rt.show("Results") rt.save(os.path.join(folder, impname + '_' + channel_name + "_pixels.csv")) print "Pixel collection done!"
def writeCSV(filePath, results, header): """ Write a table as an csv file """ rt = ResultsTable() for i in range(len(results[1])): rt.incrementCounter() for j in range(len(results)): rt.addValue(str(header[j]), results[j][i]) rt.show("Results") rt.saveAs(filePath)
def writeCSV(filePath, results, header): """ Write a table as an csv file """ rt = ResultsTable() for i in range(len(results[1])): rt.incrementCounter() for j in range(len(results)): rt.addValue(str(header[j]), results[j][i]) rt.show("Results") rt.saveAs(filePath);
def show_as_table(title, data, order=[]): """Helper function to display group and data information as a ResultsTable""" table = ResultsTable() for d in data: table.incrementCounter() order = [k for k in order] order.extend([k for k in d.keys() if not d in order]) for k in order: table.addValue(k, d[k]) table.show(title)
def showRoiSummary(table): res=ResultsTable() for i,val in table.items(): res.setValue('id',i,i+1) valInd=val.keys() valInd.remove('Class') for ind in valInd: res.setValue(ind,i,val[ind]) res.setValue('Class',i,val['Class']) res.show('[ROI Summary]'+imgName)
def showClassSummary(table): resClass=dict() resTable=ResultsTable() for i,d in table.items(): curClass=d['Class'] resClass.setdefault(curClass,[]).append(i) resClassName=sorted(resClass.keys()) for i,clsName in enumerate(resClassName): resTable.setValue('Class',i,clsName) resTable.setValue('Counts',i,len(resClass[clsName])) resTable.show('[Class Summary]'+imgName)
def reportClustersAsTable(clusters, allPoints, XColumn='X', YColumn='Y', ZColumn='Z', NRColumn='NR'): ''' Report the clustered and unclustered points in the tables 'clusters' and 'unclustered'. ''' rt = ResultsTable() counter = 1 clusterCounter = 1 clusteredPoints = [] for c in clusters: for dp in c.getPoints(): rt.incrementCounter() p = dp.getPoint() rt.addValue(NRColumn, counter) rt.addValue(XColumn, p[0]) rt.addValue(YColumn, p[1]) rt.addValue(ZColumn, p[2]) rt.addValue("C", clusterCounter) counter = counter + 1 clusteredPoints.append([p[0], p[1], p[2]]) clusterCounter = clusterCounter + 1 rt.show("clusters") win = WindowManager.getWindow("Results") rt = win.getResultsTable() X, Y, Z = getColumns(XColumn, YColumn, ZColumn) if not rt.columnExists(NRColumn): for i in range(0, len(X)): rt.setValue(NRColumn, i, i + 1) rt.updateResults() NR = getColumn(NRColumn) unclusteredPoints = [ [point.getPoint()[0], point.getPoint()[1], point.getPoint()[2]] for point in allPoints if [point.getPoint()[0], point.getPoint()[1], point.getPoint()[2]] not in clusteredPoints ] counter = 1 rt = ResultsTable() for p in unclusteredPoints: rt.incrementCounter() rt.addValue(NRColumn, counter) rt.addValue(XColumn, p[0]) rt.addValue(YColumn, p[1]) rt.addValue(ZColumn, p[2]) counter = counter + 1 rt.show("unclustered") WindowManager.setWindow(win)
def copyMatrixToRt2D(matrix,tableName="Results",sizeX=-1,sizeY=-1,useFirstRowAsHeader=False): if sizeX == -1: sizeX = len(matrix) if sizeY == -1: sizeY = len(matrix[0]) table = ResultsTable() for indexX in range(sizeX): for indexY in range(sizeY): if useFirstRowAsHeader: if indexY == 0: continue table.setValue(str(matrix[indexX][0]),indexY-1,matrix[indexX][indexY]) else: table.setValue(indexX,indexY,matrix[indexX][indexY]) table.show(tableName)
def writeToTable(mu1, sig1, prior1, mu2, sig2, prior2, threshold): rt = RT(2) rt.setValue("class", 0, 1) rt.setValue("mean", 0, mu1) rt.setValue("stddev", 0, sig1) rt.setValue("prior", 0, prior1) rt.setValue("intersection", 0, threshold) rt.setValue("count", 0, 0) rt.setValue("class", 1, 2) rt.setValue("mean", 1, mu2) rt.setValue("stddev", 1, sig2) rt.setValue("prior", 1, prior2) rt.setValue("intersection", 1, threshold) rt.setValue("count", 1, 0) rt.show("clusters")
def myResults(results): myResultsTable = ResultsTable() for idx, graph in enumerate(results.getGraph()): for edge in graph.getEdges(): edgeLength = edge.getLength() v1 = edge.getV1() v2 = edge.getV2() dist = euclideanDistance(v1, v2) #print('v1:', type(v1), v1.getPoints()) # myResultsTable.incrementCounter() # add a row to results table myResultsTable.addValue('graphID', idx) myResultsTable.addValue('length_3d', edgeLength) myResultsTable.addValue('dist', dist) if dist > 0: myResultsTable.addValue('tort', edgeLength / dist) else: myResultsTable.addValue('tort', 'inf') myResultsTable.setPrecision(6) myResultsTable.show('samiSkel_results')
def main(tableName, showPlot): image = IJ.getImage(); roi = image.getRoi() if not roi: center = image.getWidth() / 2, image.getHeight() / 2 else: center = roi.getXBase(), roi.getYBase(); table = ResultsTable.getResultsTable(tableName) rma = RadialMovementAnalyzer(table, center) radialDistances = rma.getDeltaRadialDistancePerTrack() distances = rma.getDistances() frames = rma.getFrames() travelledDistances = rma.getTravelledDistances() TABLE_NAME = "Distance from " + str(center) rt = ResultsTable.getResultsTable(TABLE_NAME) if not rt: rt = ResultsTable() for index, dist in enumerate(radialDistances): row = rt.getCounter() rt.setValue("label", row, tableName) rt.setValue("track ID", row, rma.trackIDs[index]) rt.setValue("total augmentation of distance from center", row, dist) rt.setValue("distance start to end", row, distances[index]) rt.setValue("travelled distance", row, travelledDistances[index]) rt.setValue("nr. of frames", row, frames[index]) if not distances[index] == 0: rt.setValue("total augmentation / distance start to end", row, dist / distances[index]) else: rt.setValue("total augmentation / distance start to end", row, float("nan")) if not travelledDistances[index] ==0: rt.setValue("total augmentation / travelled distance", row, dist / travelledDistances[index]) else: rt.setValue("total augmentation / travelled distance", row, float("nan")) rt.setValue("mean speed", row, travelledDistances[index] / frames[index]) rt.setValue("mean outward speed", row, dist / frames[index]) rt.show(TABLE_NAME) if showPlot: plot(distances, radialDistances, center)
def main(): # Prepare directory tree for output. indir = IJ.getDirectory("input directory") outdir = IJ.getDirectory(".csv output directory") nucdir = os.path.join(outdir, "nuclei") bacdir = os.path.join(outdir, "bacteria") rufdir = os.path.join(outdir, "ruffles") gfpdir = os.path.join(outdir, "gfp") channelsdir = os.path.join(outdir, "channels") if not os.path.isdir(nucdir): os.mkdir(nucdir) if not os.path.isdir(bacdir): os.mkdir(bacdir) if not os.path.isdir(rufdir): os.mkdir(rufdir) if not os.path.isdir(gfpdir): os.mkdir(gfpdir) if not os.path.isdir(channelsdir): os.mkdir(channelsdir) # Collect all file paths in the input directory files = readdirfiles(indir) nucResults = ResultsTable() bacResults = ResultsTable() rufResults = ResultsTable() gfpResults = ResultsTable() for file in files: if file.endswith('ome.tif') or file.endswith('ome.tiff'): imp = stackprocessor(file, nChannels=4, nSlices=7, nFrames=1) channels = ChannelSplitter.split(imp) name = imp.getTitle() IJ.log("Processing image: {}".format(name)) for c in range(len(channels)): IJ.run(channels[c], "Grays", "") IJ.run(channels[c], "Invert", "") jpgname = channels[c].getShortTitle() jpgoutfile = os.path.join(channelsdir, "{}.jpg".format(jpgname)) IJ.saveAs(channels[c].flatten(), "Jpeg", jpgoutfile) IJ.run(channels[c], "Invert", "") nuc = countobjects(channels[0], nucResults, threshMethod="Triangle", subtractBackground=True, # dilate=True, watershed=True, minSize=3.00, maxSize=100, minCirc=0.00, maxCirc=1.00) bac = countobjects(channels[1], bacResults, threshMethod="RenyiEntropy", subtractBackground=False, watershed=False, minSize=0.20, maxSize=30.00, minCirc=0.00, maxCirc=1.00) ruf = countobjects(channels[2], rufResults, threshMethod="RenyiEntropy", minSize=2.00, maxSize=30.00, minCirc=0.20, maxCirc=1.00) gfp = countobjects(channels[3], gfpResults, threshMethod="RenyiEntropy", subtractBackground=False, watershed=True, minSize=0.20, maxSize=30.00, minCirc=0.00, maxCirc=1.00) # binaries = [nuc, bac, ruf, gfp] # channels[0].show() # binaries[0].show() # binMontage = RGBStackMerge().mergeChannels(binaries, False) # binMontage.show() # chsMontage = RGBStackMerge().mergeChannels(channels, False) # binMontage = MontageMaker().makeMontage2(binMontage, # 4, # int columns # 4, # int rows # 1.00, # double scale # 1, # int first # 16, # int last # 1, # int inc # 0, # int borderWidth # False) # boolean labels) # chsMontage = MontageMaker().makeMontage2(chsMontage, # 4, # int columns # 4, # int rows # 1.00, # double scale # 1, # int first # 16, # int last # 1, # int inc # 0, # int borderWidth # False) # boolean labels) # # binMontage.show() # chsMontage.show() outfilenuc = os.path.join(nucdir, "threshold_nuc_{}".format(name)) outfilebac = os.path.join(bacdir, "threshold_bac_{}".format(name)) outfileruf = os.path.join(rufdir, "threshold_ruf_{}".format(name)) outfilegfp = os.path.join(gfpdir, "threshold_gfp_{}".format(name)) IJ.saveAs(nuc.flatten(), "Tiff", outfilenuc) IJ.saveAs(bac.flatten(), "Tiff", outfilebac) IJ.saveAs(ruf.flatten(), "Tiff", outfileruf) IJ.saveAs(gfp.flatten(), "Tiff", outfilegfp) nucResults.show("nuclei") bacResults.show("bacteria") rufResults.show("ruffles") gfpResults.show("gfp") nucout = os.path.join(outdir, "nuclei.csv") bacout = os.path.join(outdir, "bacteria.csv") rufout = os.path.join(outdir, "ruffles.csv") gfpout = os.path.join(outdir, "gfp.csv") ResultsTable.save(nucResults, nucout) ResultsTable.save(bacResults, bacout) ResultsTable.save(rufResults, rufout) ResultsTable.save(gfpResults, gfpout)
roi = rm.getRoi(i) raw_ch1.setRoi(roi) stats = raw_ch1.getStatistics() rm.setSelectedIndexes([i, size - 1]) rm.runCommand(raw_ch1, "AND") rm.addRoi(raw_ch1.getRoi()) roi = rm.getRoi(rm.getCount() + 1) stats2 = raw_ch1.getStatistics() rm.setSelectedIndexes([i, rm.getCount()]) rm.runCommand(raw_ch1, "XOR") rm.addRoi(raw_ch1.getRoi()) roi = rm.getRoi(rm.getCount() + 1) stats3 = raw_ch1.getStatistics() table.addValue("Area full section", stats.area) table.addValue("Sum of Intesity", stats.area * stats.mean) table.addValue("Mean Int Pixels", stats.mean) table.addValue("Area Foreground", stats2.area) table.addValue("Sum of Intesity Foreground", stats2.area * stats2.mean) table.addValue("Mean Int Pixels Foreground", stats2.mean) table.addValue("Area Background", stats3.area) table.addValue("Sum of Intesity Background", stats3.area * stats.mean) table.addValue("Mean Int Pixels Background", stats3.mean) table.show("Results Analysis")
def updatepressed(event): self.__image=IJ.getImage() rm = RoiManager.getInstance() if (rm==None): rm = RoiManager() rm.runCommand("reset") self.__image.killRoi() IJ.run("Threshold...") IJ.setAutoThreshold(self.__image, "MaxEntropy") rt=ResultsTable() pa=ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER+ParticleAnalyzer.CLEAR_WORKSHEET , Measurements.AREA+Measurements.ELLIPSE+Measurements.MEAN, rt, 0.00, 10000.00, 0.00, 1.00) pa.analyze(self.__image) self.__roisArray=[] self.__roisArray=rm.getRoisAsArray() #for i in range(rm.getCount()) : # rm.select(i) # rm.runCommand("Set Color", "0000FF", 2) IJ.resetThreshold(self.__image) rt.show("tempRT") areas=rt.getColumn(ResultsTable.AREA) means=rt.getColumn(ResultsTable.MEAN) majors=rt.getColumn(ResultsTable.MAJOR) minors=rt.getColumn(ResultsTable.MINOR) #print 0 if self.__slidersDict["Area_max"].getMaximum() < int(max(areas)+1): # print 1 self.__slidersDict["Area_max"].setMaximum(int(max(areas))+1) if self.__slidersDict["Area_min"].getMaximum() < int(max(areas)+1): # print 2 self.__slidersDict["Area_min"].setMaximum(int(max(areas))+1) if self.__slidersDict["Mean_max"].getMaximum() < int(max(means)+1): # print 3 self.__slidersDict["Mean_max"].setMaximum(int(max(means))+1) if self.__slidersDict["Mean_min"].getMaximum() < int(max(means)+1): # print 4 self.__slidersDict["Mean_min"].setMaximum(int(max(means))+1) if self.__slidersDict["Major_max"].getMaximum() < int(max(majors)): # print 5 self.__slidersDict["Major_max"].setMaximum(int(max(majors))+1) if self.__slidersDict["Major_min"].getMaximum() < int(max(majors)+1): # print 6 self.__slidersDict["Major_min"].setMaximum(int(max(majors))+1) if self.__slidersDict["Minor_max"].getMaximum() < int(max(minors)+1): # print 7 self.__slidersDict["Minor_max"].setMaximum(int(max(minors))+1) if self.__slidersDict["Minor_min"].getMaximum() < int(max(minors)+1): # print 8 self.__slidersDict["Minor_min"].setMaximum(int(max(minors))+1) if self.__slidersDict["AR_max"].getMaximum() < int((max(majors)+1)/min(minors)+1): # print 9 self.__slidersDict["AR_max"].setMaximum(int((max(majors)+1)/(min(minors)))) if self.__slidersDict["AR_min"].getMaximum() < int((max(majors)+1)/min(minors)): # print 10 self.__slidersDict["AR_min"].setMaximum(int((max(majors)+1)/(min(minors)))) #print 11 for sb in self.__slidersDict.values(): sb.repaint() #rm.runCommand("reset") #temprois=self.getIncludeRois() #IJ.run(self.__image, "Remove Overlay", "") #o=Overlay() #for roi in temprois: # o.addElement(roi) #self.__image.killRoi() #self.__image.setOverlay(o) self.__image.updateAndDraw()
def __fmeasures(self) : self.__Cutoff = float(self.__display4.text) nslices = self.__impRes.getImageStackSize() rt = ResultsTable() rt.show("RT-"+self.__name) if self.__maxfinder : twpoints = TextWindow("points-"+self.__name, "index\tlabel\tname\tx\ty\taxis\tcellw\tcellh", "", 200, 450) twlabels = TextWindow("labels-"+self.__name, "index\tlabel\tname\tnpoints", "", 200, 450) isres = self.__impRes.getImageStack() for index in range(1, nslices+1): pc = (index*100)/nslices IJ.showStatus("Je suis a "+str(pc)+"%") self.__impRes.setSlice(index) self.__impRes.killRoi() roi = self.__listrois[index-1] self.__impRes.setRoi(roi) analyser= Analyzer(self.__impRes, Analyzer.LABELS+Analyzer.CENTER_OF_MASS+Analyzer.CENTROID+Analyzer.INTEGRATED_DENSITY+Analyzer.MEAN+Analyzer.KURTOSIS+Analyzer.SKEWNESS+Analyzer.MIN_MAX+Analyzer.SLICE+Analyzer.STACK_POSITION+Analyzer.STD_DEV, rt) analyser.measure() rt.show("RT-"+self.__name) rect=roi.getBounds() ip = self.__impRes.getProcessor() xCoord = [] yCoord = [] currentPixel = [] m00 = 0.00 m10 = 0.00 m01 = 0.00 mc20 = 0.00 mc02 = 0.00 mc11 = 0.00 mc30 = 0.00 mc03 = 0.00 mc21 = 0.00 mc12 = 0.00 mc40 = 0.00 mc04 = 0.00 mc31 = 0.00 mc13 = 0.00 mm20 = 0.00 mm02 = 0.00 mm11 = 0.00 mm30 = 0.00 mm03 = 0.00 mm21 = 0.00 mm12 = 0.00 mm40 = 0.00 mm04 = 0.00 mm31 = 0.00 mm13 = 0.00 #for y in range(rect.y, rect.y+rect.height, 1) : # for x in range(rect.x, rect.x+rect.width, 1) : # xCoord.append(x+0.5) # yCoord.append(y+0.5) # #pixel=ip.getf(x,y)-self.__Cutoff # pixel = ip.getPixelValue(x,y)-self.__Cutoff # if pixel < 0 : pixel = 0 # currentPixel.append(pixel) # m00 += currentPixel[-1] # m10 += currentPixel[-1]*xCoord[-1] # m01 += currentPixel[-1]*yCoord[-1] #xm = m10/(m00+0.00000001) #ym = m01/(m00+0.00000001) #xc = rect.width/2.00 #yc = rect.height/2.00 #for i in range(rect.width*rect.height) : # xcrel = xCoord[i]-xc # ycrel = yCoord[i]-yc # #mc20 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc) # #mc02 += currentPixel[i]*(yCoord[i]-yc)*(yCoord[i]-yc) # #mc11 += currentPixel[i]*(xCoord[i]-xc)*(yCoord[i]-yc) # # # #mc30 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc)*(xCoord[i]-xc) # #mc03 += currentPixel[i]*(yCoord[i]-yc)*(yCoord[i]-yc)*(yCoord[i]-yc) # #mc21 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc)*(yCoord[i]-yc) # #mc12 += currentPixel[i]*(xCoord[i]-xc)*(yCoord[i]-yc)*(yCoord[i]-yc) # # # #mc40 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc)*(xCoord[i]-xc)*(xCoord[i]-xc) # #mc04 += currentPixel[i]*(yCoord[i]-yc)*(yCoord[i]-yc)*(yCoord[i]-yc)*(yCoord[i]-yc) # #mc31 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc)*(xCoord[i]-xc)*(yCoord[i]-yc) # #mc13 += currentPixel[i]*(xCoord[i]-xc)*(yCoord[i]-yc)*(yCoord[i]-yc)*(yCoord[i]-yc) # mc20 += currentPixel[i]*xcrel*xcrel # mc02 += currentPixel[i]*ycrel*ycrel # mc11 += currentPixel[i]*xcrel*ycrel # mc30 += currentPixel[i]*xcrel*xcrel*xcrel # mc03 += currentPixel[i]*ycrel*ycrel*ycrel # mc21 += currentPixel[i]*xcrel*xcrel*ycrel # mc12 += currentPixel[i]*xcrel*ycrel*ycrel # mc40 += currentPixel[i]*xcrel*xcrel*xcrel*xcrel # mc04 += currentPixel[i]*ycrel*ycrel*ycrel*ycrel # mc31 += currentPixel[i]*xcrel*xcrel*xcrel*ycrel # mc13 += currentPixel[i]*xcrel*ycrel*ycrel*ycrel #for i in range(rect.width*rect.height) : # mm20 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm) # mm02 += currentPixel[i]*(yCoord[i]-ym)*(yCoord[i]-ym) # mm11 += currentPixel[i]*(xCoord[i]-xm)*(yCoord[i]-ym) # mm30 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm)*(xCoord[i]-xm) # mm03 += currentPixel[i]*(yCoord[i]-ym)*(yCoord[i]-ym)*(yCoord[i]-ym) # mm21 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm)*(yCoord[i]-ym) # mm12 += currentPixel[i]*(xCoord[i]-xm)*(yCoord[i]-ym)*(yCoord[i]-ym) # mm40 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm)*(xCoord[i]-xm)*(xCoord[i]-xm) # mm04 += currentPixel[i]*(yCoord[i]-ym)*(yCoord[i]-ym)*(yCoord[i]-ym)*(yCoord[i]-ym) # mm31 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm)*(xCoord[i]-xm)*(yCoord[i]-ym) # mm13 += currentPixel[i]*(xCoord[i]-xm)*(yCoord[i]-ym)*(yCoord[i]-ym)*(yCoord[i]-ym) #xxcVar = mc20/m00 #yycVar = mc02/m00 #xycVar = mc11/m00 #xcSkew = mc30/(m00 * math.pow(xxcVar,(3.0/2.0))) #ycSkew = mc03/(m00 * math.pow(yycVar,(3.0/2.0))) #xcKurt = mc40 / (m00 * math.pow(xxcVar,2.0)) - 3.0 #ycKurt = mc04 / (m00 * math.pow(yycVar,2.0)) - 3.0 #ecc = (math.pow((mc20-mc02),2.0)+(4.0*mc11*mc11))/m00 #xxmVar = mm20/m00 #yymVar = mm02/m00 #xymVar = mm11/m00 #xmSkew = mm30/(m00 * math.pow(xxmVar,(3.0/2.0))) #ymSkew = mm03/(m00 * math.pow(yymVar,(3.0/2.0))) #xmKurt = mm40 / (m00 * math.pow(xxmVar,2.0)) - 3.0 #ymKurt = mm04 / (m00 * math.pow(yymVar,2.0)) - 3.0 #ecm = (math.pow((mm20-mm02),2.0)+(4.0*mm11*mm11))/m00 #rt.addValue("xxcVar", xxcVar) #rt.addValue("yycVar", yycVar) #rt.addValue("xycVar", xycVar) #rt.addValue("xcSkew", xcSkew) #rt.addValue("ycSkew", ycSkew) #rt.addValue("xcKurt", xcKurt) #rt.addValue("ycKurt", ycKurt) #rt.addValue("Ecc", ecc) #rt.addValue("xxmVar", xxmVar) #rt.addValue("yymVar", yymVar) #rt.addValue("xymVar", xymVar) #rt.addValue("xmSkew", xmSkew) #rt.addValue("ymSkew", ymSkew) #rt.addValue("xmKurt", xmKurt) #rt.addValue("ymKurt", ymKurt) #rt.addValue("Ecm", ecm) rt.addValue("roiw", rect.width) rt.addValue("roih", rect.height) rt.addValue("cellw", self.__ipw[index-1]) rt.addValue("cellh", self.__iph[index-1]) self.__impRes.killRoi() xCoord[:] = [] yCoord[:] = [] currentPixel[:] = [] points = [] points[:] = [] npointsmax = 0 #lab = self.__labels[index-1] nameroi = self.__dictCells[index][0] lab = self.__dictCells[index][1] if self.__maxfinder : self.__impMax.setSlice(index) ipmax = self.__impMax.getProcessor() for y in range(ipmax.getHeight()) : for x in range(ipmax.getWidth()) : if ipmax.getPixelValue(x,y) > 0 : twpoints.append(str(index)+"\t"+lab+"\t"+nameroi+"\t"+str(x)+"\t"+str(y)+"\t"+str(self.__cellsrois[index-1][0].getLength())+"\t"+str(self.__ipw[index-1])+"\t"+str(self.__iph[index-1])) npointsmax+=1 rt.addValue("npoints", npointsmax) twlabels.append(str(index)+"\t"+lab+"\t"+nameroi+"\t"+str(npointsmax)) rt.show("RT-"+self.__name) rt.show("RT-"+self.__name)
imp_threshold.close() imp_DoG.close() ort.incrementCounter() ort.addValue("Frame", i) ort.addValue("Spots C1", maximaC1.npoints) ort.addValue("Spots C2", maximaC2.npoints) ort.addValue("Area occupied with cells in %", str((100-100/stats_all.area*stats_background.area))) ort.addValue("Area occupied with cells", str((stats_all.area-stats_background.area))) ort.addValue("Noise C1", str(noise_C1)) ort.addValue("Noise C2", str(noise_C2)) IJ.run(imp1, "Select None", "") ort.show("Counted spots") if automatic_save_results: dataname = imp1.getShortTitle() filename = dataname+".csv" #files = glob.glob(savepath+"/"+dataname+"*.csv") savename = savepath+"/"+filename ort.saveAs(savename) """ if automatic_save_results: dataname = imp1.getShortTitle() filename = dataname+"_001.csv"
continue idx = tempvar.index(max(tempvar)) start = frame[0] metaphase = frame[idx+3] if (division - start > 15 and division - start < 100): if start>0: for spot in sortedTrack: # Fetch spot features directly from spot. x=spot.getFeature('POSITION_X') y=spot.getFeature('POSITION_Y') t=spot.getFeature('FRAME') roi2 = OvalRoi(x/dx - (6*dx), y/dy - (6*dy), 12, 12) roi2.setPosition(int(t)) rm.add(imp, roi2, nextRoi) nextRoi = nextRoi+1 resultstable.setValue("IMAGE_NAME", trackrowNumber, filename) resultstable.setValue("TRACK_ID", trackrowNumber, id) resultstable.setValue("START", trackrowNumber, start) resultstable.setValue("METAPHASE", trackrowNumber, metaphase) resultstable.setValue("END", trackrowNumber, division) trackrowNumber = trackrowNumber + 1 # plot = Plot(str(id), "slice", "mean", frame, var) # plot.show() # break # imp.close() resultstable.show("Results")
Channel4_count = RoiManager.getInstance().getCount() print Channel4_count time.sleep(0.5) rm.runCommand("reset") time.sleep(0.5) ort = ResultsTable() ort.setPrecision(2) ort.incrementCounter() ort.addValue("Channel1", Channel1_count) ort.addValue("Channel2", Channel2_count) ort.addValue("Channel3", Channel3_count) ort.addValue("Channel4", Channel4_count) ort.show("Results") if automatic_save_results: dataname = imp1.getTitle() filename = dataname + ".csv" #files = glob.glob(savepath+"/"+dataname+"*.csv") savename = savepath + "/" + filename ort.saveAs(savename) Channel1.changes = False Channel1.close() Channel2.changes = False Channel2.close() Channel3.changes = False Channel3.close() Channel4.changes = False
def main(imp,options): from ij.plugin import ChannelSplitter from ij.gui import Roi,PointRoi, PolygonRoi, Overlay, Line from java.awt import Color from ij import WindowManager from ij.measure import ResultsTable from ij.text import TextWindow active_z=imp.getZ() imps = ChannelSplitter.split(imp) imp.setZ(active_z) roi_int = imp.getRoi() comp_imp=Zproj(imps[options["comp_ch"]], "SUM", active_z, options["z_range"]) comp_imp=mode_subtract(comp_imp,roi_int) loci_imp=Zproj(imps[options["loci_ch"]], "SUM", imp.getZ(), options["z_range"]) loci_imp=mode_subtract(loci_imp,roi_int) #Finding the boundaries of compartment and loci comp_roi=thresh(sum_prj=comp_imp,thresh=options["comp_T"],roi=roi_int,method="boundary") print "ok" if (options["loci_method"]== "locus center"): loci_roi=thresh(sum_prj=loci_imp, thresh=options["loci_T"], roi=roi_int, method="point") elif options["loci_method"]== "locus boundary": loci_roi=thresh(sum_prj=loci_imp, thresh=options["loci_T"], roi=roi_int, method="boundary") if options["loci_method"]== "locus center": dist,xc,yc,xl,yl=get_center_edge_dist(imp,comp_roi, loci_roi) elif options["loci_method"]== "locus boundary": dist,xc,yc,xl,yl=get_closest_points(imp,comp_roi,loci_roi) rt_exist = WindowManager.getWindow("Loci distance to compartment") if rt_exist==None or not isinstance(rt_exist, TextWindow): table= ResultsTable() else: table = rt_exist.getTextPanel().getOrCreateResultsTable() table.incrementCounter() table.addValue("Label", imp.title) table.addValue("Distance(micron)", dist) if options['measure_feret']: feret_roi,loci_feret,loci_area= feret(sum_prj=loci_imp,thresh=options["loci_T"], roi=roi_int,pixel_size=imp.getCalibration().pixelWidth) table.addValue("Loci feret", loci_feret) table.addValue("Loci area", loci_area) table.show("Loci distance to compartment") ## Adding loci overlay ov=imp.getOverlay() if ov==None: ov=Overlay() line = Line(xc,yc, xl,yl) line.setStrokeWidth(0.2) line.setStrokeColor(Color.PINK) ov.add(line) if options["loci_method"]== "locus center": ov.add(PointRoi(loci_roi["x"],loci_roi["y"])) elif options["loci_method"]== "locus boundary": ov.add(loci_roi) if options['measure_feret']: ov.add(feret_roi) ov.add(comp_roi) imp.setOverlay(ov)
class MandersPlugin(ImageListener, WindowAdapter): def __init__(self): self.imp = None self.preview = None self.createMainWindow() self.cells = None self.files = [] self.results = ResultsTable() ImagePlus.addImageListener(self) self.selectInputDir() self.selectOutputDir() self.pairs = [] self.methods = [] self.processNextFile() def selectInputDir(self): inputDialog = DirectoryChooser("Please select a directory contaning your images") inputDir = inputDialog.getDirectory() for imageFile in os.listdir(inputDir): self.files.append(inputDir + imageFile) def selectOutputDir(self): outputDialog = DirectoryChooser("Please select a directory to save your results") self.outputDir = outputDialog.getDirectory() def closeImage(self): if self.imp is not None: self.imp.close() self.imp = None if self.preview is not None: self.preview.close() self.preview = None def openImage(self, imageFile): try: images = BF.openImagePlus(imageFile) self.imp = images[0] except UnknownFormatException: return None if self.imp.getNChannels() < 2: IJ.error("Bad image format", "Image must contain at lease 2 channels!") return None if not self.pairs or \ not self.methods: self.getOptionsDialog(self.imp) title = self.imp.title self.imp.title = title[:title.rfind('.')] return self.imp def getOptionsDialog(self, imp): thr_methods = ["None", "Default", "Huang", "Intermodes", "IsoData", "Li", "MaxEntropy","Mean", "MinError(I)", "Minimum", "Moments", "Otsu", "Percentile", "RenyiEntropy", "Shanbhag" , "Triangle", "Yen"] gd = GenericDialog("Please select channels to collocalize") for i in range(1, imp.getNChannels() + 1): gd.addChoice("Threshold method for channel %i" % i, thr_methods, "None") gd.showDialog() if gd.wasCanceled(): self.exit() channels = [] for i in range(1, imp.getNChannels() + 1): method = gd.getNextChoice() self.methods.append(method) if method != "None": channels.append(i) for x in channels: for y in channels: if x < y: self.pairs.append((x, y)) def processNextFile(self): if self.files: imageFile = self.files.pop(0) return self.processFile(imageFile) else: return False def processFile(self, imageFile): imp = self.openImage(imageFile) if imp is not None: cell = Cell(imp.NSlices, 1) self.cells = DelegateListModel([]) self.cells.append(cell) self.showMainWindow(self.cells) if self.checkbox3D.isSelected(): self.displayImage(imp) else: self.displayImage(imp, False) self.preview = self.previewImage(imp) self.displayImage(self.preview) return True else: return self.processNextFile() def displayImage(self, imp, show = True): imp.setDisplayMode(IJ.COMPOSITE) enhancer = ContrastEnhancer() enhancer.setUseStackHistogram(True) splitter = ChannelSplitter() for c in range(1, imp.getNChannels() + 1): imp.c = c enhancer.stretchHistogram(imp, 0.35) if show: imp.show() def previewImage(self, imp): roi = imp.getRoi() splitter = ChannelSplitter() channels = [] for c in range(1, imp.getNChannels() + 1): channel = ImagePlus("Channel %i" % c, splitter.getChannel(imp, c)) projector = ZProjector(channel) projector.setMethod(ZProjector.MAX_METHOD) projector.doProjection() channels.append(projector.getProjection()) image = RGBStackMerge.mergeChannels(channels, False) image.title = imp.title + " MAX Intensity" image.luts = imp.luts imp.setRoi(roi) return image def getCroppedChannels(self, imp, cell): splitter = ChannelSplitter() imp.setRoi(None) if cell.mode3D: cropRoi = cell.getCropRoi() else: cropRoi = cell.roi if cropRoi is None: return None crop = cropRoi.getBounds() channels = [] for c in range(1, imp.getNChannels() + 1): slices = ImageStack(crop.width, crop.height) channel = splitter.getChannel(imp, c) for z in range(1, channel.getSize() + 1): zslice = channel.getProcessor(z) zslice.setRoi(cropRoi) nslice = zslice.crop() if cell.mode3D: oroi = cell.slices[z - 1].roi else: oroi = cell.roi if oroi is not None: roi = oroi.clone() bounds = roi.getBounds() roi.setLocation(bounds.x - crop.x, bounds.y - crop.y) nslice.setColor(Color.black) nslice.fillOutside(roi) slices.addSlice(nslice) channels.append(ImagePlus("Channel %i" % c, slices)) return channels def getThreshold(self, imp, method): thresholder = Auto_Threshold() duplicator = Duplicator() tmp = duplicator.run(imp) return thresholder.exec(tmp, method, False, False, True, False, False, True) def getContainer(self, impA, impB): imgA = ImagePlusAdapter.wrap(impA) imgB = ImagePlusAdapter.wrap(impB) return DataContainer(imgA, imgB, 1, 1, "imageA", "imageB") def getManders(self, imp, cell): ### Crop channels according to cell mask channels = self.getCroppedChannels(imp, cell) if channels is None: return None ### Calculate channel thresholds thrs = [] thrimps = [] for c, method in enumerate(self.methods): if method != "None": thr, thrimp = self.getThreshold(channels[c], method) else: thr, thrimp = None, None thrs.append(thr) thrimps.append(thrimp) ### Calculate manders colocalization manders = MandersColocalization() raws = [] thrds = [] for chA, chB in self.pairs: container = self.getContainer(channels[chA - 1], channels[chB - 1]) img1 = container.getSourceImage1() img2 = container.getSourceImage2() mask = container.getMask() cursor = TwinCursor(img1.randomAccess(), img2.randomAccess(), Views.iterable(mask).localizingCursor()) rtype = img1.randomAccess().get().createVariable() raw = manders.calculateMandersCorrelation(cursor, rtype) rthr1 = rtype.copy() rthr2 = rtype.copy() rthr1.set(thrs[chA - 1]) rthr2.set(thrs[chB - 1]) cursor.reset() thrd = manders.calculateMandersCorrelation(cursor, rthr1, rthr2, ThresholdMode.Above) raws.append(raw) thrds.append(thrd) return (channels, thrimps, thrs, raws, thrds) def saveMultichannelImage(self, title, channels, luts): tmp = RGBStackMerge.mergeChannels(channels, False) tmp.luts = luts saver = FileSaver(tmp) saver.saveAsTiffStack(self.outputDir + title + ".tif") tmp.close() def createMainWindow(self): self.frame = JFrame('Select cells and ROIs', defaultCloseOperation = JFrame.DISPOSE_ON_CLOSE ) self.frame.setLayout(GridBagLayout()) self.frame.addWindowListener(self) self.frame.add(JLabel("Cells"), GridBagConstraints(0, 0, 1, 1, 0, 0, GridBagConstraints.CENTER, GridBagConstraints.NONE, Insets(5, 2, 2, 0), 0, 0 )) self.cellList = JList(DelegateListModel([]), selectionMode = ListSelectionModel.SINGLE_SELECTION, cellRenderer = MyRenderer(), selectedIndex = 0, valueChanged = self.selectCell ) self.frame.add(JScrollPane(self.cellList), GridBagConstraints(0, 1, 1, 5, .5, 1, GridBagConstraints.CENTER, GridBagConstraints.BOTH, Insets(0, 2, 2, 0), 0, 0 )) self.frame.add(JButton('Add cell', actionPerformed = self.addCell), GridBagConstraints(1, 2, 1, 2, 0, .25, GridBagConstraints.CENTER, GridBagConstraints.NONE, Insets(0, 0, 0, 0), 0, 0 )) self.frame.add(JButton('Remove cell', actionPerformed = self.removeCell), GridBagConstraints(1, 4, 1, 2, 0, .25, GridBagConstraints.CENTER, GridBagConstraints.NONE, Insets(0, 5, 0, 5), 0, 0 )) self.frame.add(JLabel("Slices"), GridBagConstraints(0, 6, 1, 1, 0, 0, GridBagConstraints.CENTER, GridBagConstraints.NONE, Insets(5, 2, 2, 0), 0, 0 )) self.sliceList = JList(DelegateListModel([]), selectionMode = ListSelectionModel.SINGLE_SELECTION, cellRenderer = MyRenderer(), selectedIndex = 0, valueChanged = self.selectSlice ) self.frame.add(JScrollPane(self.sliceList), GridBagConstraints(0, 7, 1, 5, .5, 1, GridBagConstraints.CENTER, GridBagConstraints.BOTH, Insets(0, 2, 2, 0), 0, 0 )) self.frame.add(JButton('Update ROI', actionPerformed = self.updateSlice), GridBagConstraints(1, 8, 1, 2, 0, .25, GridBagConstraints.CENTER, GridBagConstraints.NONE, Insets(0, 0, 0, 0), 0, 0 )) self.frame.add(JButton('Done', actionPerformed = self.doneSelecting), GridBagConstraints(1, 10, 1, 2, 0, .25, GridBagConstraints.CENTER, GridBagConstraints.NONE, Insets(0, 0, 0, 0), 0, 0 )) self.checkbox3D = JCheckBox('3D selection mode', True, actionPerformed=self.toggle3D) self.frame.add(self.checkbox3D, GridBagConstraints(0, 13, 2, 1, 0, 1, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(0, 0, 0, 0), 0, 0 )) def showMainWindow(self, cells = None): if cells is not None: self.cellList.model = cells if cells: self.cellList.selectedIndex = 0 self.frame.pack() self.frame.visible = True def hideMainWindow(self): self.frame.visible = False def closeMainWindow(self): self.frame.dispose() def toggle3D(self, event): mode3D = self.checkbox3D.isSelected() if mode3D: self.sliceList.enabled = True if self.imp is not None: self.imp.show() if self.preview is not None: self.preview.hide() else: self.sliceList.enabled = False if self.preview is None: self.preview = self.previewImage(self.imp) self.displayImage(self.preview) else: self.preview.show() if self.imp is not None: self.imp.hide() selectedCell = self.cellList.selectedIndex if selectedCell >= 0: cell = self.cells[selectedCell] self.sliceList.model = cell.slices self.sliceList.selectedIndex = 0 def addCell(self, event): size = len(self.cells) if (size > 0): last = self.cells[size - 1] n = last.n + 1 else: n = 1 self.cells.append(Cell(self.imp.NSlices, n)) self.cellList.selectedIndex = size def removeCell(self, event): selected = self.cellList.selectedIndex if selected >= 0: self.cells.remove(self.cells[selected]) if (selected >= 1): self.cellList.selectedIndex = selected - 1 else: self.cellList.selectedIndex = 0 def selectCell(self, event): selected = self.cellList.selectedIndex if selected >= 0: cell = self.cells[selected] self.sliceList.model = cell.slices self.sliceList.selectedIndex = 0 else: self.sliceList.model = DelegateListModel([]) if self.preview is not None: self.preview.setRoi(cell.roi) def selectSlice(self, event): selectedCell = self.cellList.selectedIndex selectedSlice = self.sliceList.selectedIndex if selectedCell >= 0 and selectedSlice >= 0: cell = self.cells[selectedCell] image = self.imp mode3D = self.checkbox3D.isSelected() if image is not None and cell is not None and mode3D: roi = cell.slices[selectedSlice].roi if (image.z - 1 != selectedSlice): image.z = selectedSlice + 1 image.setRoi(roi, True) if self.preview is not None and not mode3D: self.preview.setRoi(cell.roi, True) def updateSlice(self, event): if self.checkbox3D.isSelected(): self.updateSlice3D(self.imp) else: self.updateSlice2D(self.preview) def updateSlice3D(self, imp): selectedCell = self.cellList.selectedIndex selectedSlice = self.sliceList.selectedIndex if selectedCell >= 0 and selectedSlice >= 0 and imp is not None: cell = self.cells[selectedCell] impRoi = imp.getRoi() if cell is not None and impRoi is not None: index = selectedSlice + 1 roi = ShapeRoi(impRoi, position = index) cell.mode3D = True cell.name = "Cell %i (3D)" % cell.n cell.slices[selectedSlice].roi = roi if (index + 1 <= len(cell.slices)): imp.z = index + 1 self.cellList.repaint(self.cellList.getCellBounds(selectedCell, selectedCell)) self.sliceList.repaint(self.sliceList.getCellBounds(selectedSlice, selectedSlice)) def updateSlice2D(self, imp): selectedCell = self.cellList.selectedIndex if selectedCell >= 0 and imp is not None: cell = self.cells[selectedCell] impRoi = imp.getRoi() if cell is not None and impRoi is not None: roi = ShapeRoi(impRoi, position = 1) cell.mode3D = False cell.name = "Cell %i (2D)" % cell.n cell.roi = roi self.cellList.repaint(self.cellList.getCellBounds(selectedCell, selectedCell)) def imageOpened(self, imp): pass def imageClosed(self, imp): pass def imageUpdated(self, imp): if self.checkbox3D.isSelected(): if imp is not None: selectedCell = self.cellList.selectedIndex selectedSlice = imp.z - 1 if imp == self.imp and selectedSlice != self.sliceList.selectedIndex: self.sliceList.selectedIndex = selectedSlice def doneSelecting(self, event): oluts = self.imp.luts luts = [] channels = [] for c, method in enumerate(self.methods): if method != "None": luts.append(oluts[c]) channels.append(c) for cell in self.cells: manders = self.getManders(self.imp, cell) if manders is not None: chimps, thrimps, thrs, raws, thrds = manders index = self.cells.index(cell) + 1 title = "Cell_%i-" % index + self.imp.title self.saveMultichannelImage(title, chimps, oluts) title = "Cell_%i_thrd-" % index + self.imp.title self.saveMultichannelImage(title, thrimps, luts) self.results.incrementCounter() row = self.results.getCounter() - 1 for i, thr in enumerate(thrs): if thr is not None: self.results.setValue("Threshold %i" % (i + 1), row, int(thr)) for i, pair in enumerate(self.pairs): self.results.setValue("%i-%i M1 raw" % pair, row, float(raws[i].m1)) self.results.setValue("%i-%i M2 raw" % pair, row, float(raws[i].m2)) self.results.setValue("%i-%i M1 thrd" % pair, row, float(thrds[i].m1)) self.results.setValue("%i-%i M2 thrd" % pair, row, float(thrds[i].m2)) self.closeImage() if not self.processNextFile(): print "All done - happy analysis!" self.results.show("Manders collocalization results") self.exit() def windowClosing(self, e): print "Closing plugin - BYE!!!" self.exit() def exit(self): ImagePlus.removeImageListener(self) self.closeImage() self.closeMainWindow()
# Read peak coordinates into an array of integers peak.localize(p) roi.addPoint(imp, p[0], p[1]) imp.setRoi(roi) # Now, iterate each peak, defining a small interval centered at each peak, # and measure the sum of total pixel intensity, # and display the results in an ImageJ ResultTable. table = ResultsTable() for peak in peaks: # Read peak coordinates into an array of integers peak.localize(p) # Define limits of the interval around the peak: # (sigmaSmaller is half the radius of the embryo) minC = [p[i] - sigmaSmaller for i in range(img.numDimensions())] maxC = [p[i] + sigmaSmaller for i in range(img.numDimensions())] # View the interval around the peak, as a flat iterable (like an array) fov = Views.interval(img, minC, maxC) # Compute sum of pixel intensity values of the interval # (The t is the Type that mediates access to the pixels, via its get* methods) s = sum(t.getInteger() for t in fov) # Add to results table table.incrementCounter() table.addValue("x", p[0]) table.addValue("y", p[1]) table.addValue("sum", s) table.show("Embryo intensities at peaks")
from java.lang import Double imp = IJ.getImage() # Create a table to store the results table = ResultsTable() # Create a hidden ROI manager, to store a ROI for each blob or cell roim = RoiManager(True) # Create a ParticleAnalyzer, with arguments: # 1. options (could be SHOW_ROI_MASKS, SHOW_OUTLINES, SHOW_MASKS, SHOW_NONE, ADD_TO_MANAGER, and others; combined with bitwise-or) # 2. measurement options (see [http://rsb.info.nih.gov/ij/developer/api/ij/measure/Measurements.html Measurements]) # 3. a ResultsTable to store the measurements # 4. The minimum size of a particle to consider for measurement # 5. The maximum size (idem) # 6. The minimum circularity of a particle # 7. The maximum circularity pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA + Measurements.CENTER_OF_MASS + Measurements.SHAPE_DESCRIPTORS + Measurements.INTEGRATED_DENSITY, table, 0, Double.POSITIVE_INFINITY, 0.0, 1.0) pa.setHideOutputImage(True) if pa.analyze(imp): print "All ok" table.show("foooo"); else: print "There was a problem in analyzing", blobs
# 1. options (could be SHOW_ROI_MASKS, SHOW_OUTLINES, SHOW_MASKS, SHOW_NONE, ADD_TO_MANAGER, and others; combined with bitwise-or) # 2. measurement options (see [http://imagej.net/developer/api/ij/measure/Measurements.html Measurements]) # 3. a ResultsTable to store the measurements # 4. The minimum size of a particle to consider for measurement # 5. The maximum size (idem) # 6. The minimum circularity of a particle # 7. The maximum circularity minSize = 30.0 maxSize = 10000.0 opts = ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES | ParticleAnalyzer.SHOW_OVERLAY_OUTLINES print(opts) meas = Measurements.AREA | Measurements.MEAN | Measurements.CENTER_OF_MASS print(meas) pa = ParticleAnalyzer(opts, meas, results_table, minSize, maxSize) # pa.setHideOutputImage(False) pa.setRoiManager(roim) if pa.analyze(imp_work): imp_out = pa.getOutputImage() # imp_out.show() roim.runCommand(blobs, "Show All with labels") blobs.show() results_table.show("Results") roim.show() print "All ok" else: print "There was a problem in analyzing", blobs # The measured areas are listed in the first column of the results table, as a float array: areas = results_table.getColumn(0)
def cellSegmentation(srcDir, dstDir, currentDir, filename, keepDirectories): print "Processing:" # Opening the image print "Open image file", filename imp = IJ.openImage(os.path.join(currentDir, dstDir)) # Put your processing commands here! localinput=srcDir.replace("/", "\\") saveDir = localinput.replace(srcDir, dstDir) string="." dotIndex=filename.find(string) localfile= filename[0:dotIndex] print(localfile) IJ.run("New... ", "name="+f+" type=Table") print(f,"\\Headings:Cell\tarea\tCirc\tAR\tRoundness\tMaximum") IJ.run("Bio-Formats", "open=[" + localinput + os.path.sep + filename +"] autoscale color_mode=Default rois_import=[ROI manager] view=Hyperstack stack_order=XYCZT") IJ.open() idd= WM.getIDList(); imageID= idd[0]; IJ.run("Clear Results") WM.getImage(imageID) IJ.run("Duplicate...", "duplicate channels="+str(x)+"") #Nucleus channel #took away x IJ.run("Z Project...", "projection=[Standard Deviation]");#picture for frame detection IJ.run("8-bit"); IJ.run("Duplicate...", "title=IMAGE");#frame IJ.run("Duplicate...", "title=SUBTRACT");#Background subtraction mask (for frame and watershed) imp=IJ.getImage() pixelWidth=imp.getWidth() pixelWidth=pixelWidth/1647.89 pixelHeight= imp.getHeight() #create subtraction mask, applying constraining maximum (step I) IJ.selectWindow("SUBTRACT") nResults=imp.getStatistics() row = nResults rt_exist = WM.getWindow("Results") if rt_exist==None: rt= ResultsTable() else: rt = rt_exist.getTextPanel().getOrCreateResultsTable() rt.setValue("Max ", 0, row.max) #text file rt.show("Results") u=math.floor(row.mean*3) IJ.run("Max...","value="+str(u)) #constraining maximum of 3-fold mean to reduce effect of extreme values during subtraction #gaussian blurring (step II) IJ.run("Gaussian Blur...", "sigma=100 scaled") #blurring for subtraction mask IJ.selectWindow("IMAGE") pxrollrad = cellradius/pixelWidth; #rolling ball radius in pixels needed (= predefined cell radius[µm]/pixelsize[µm/px]) IJ.run("Subtract Background...", "rolling="+str(pxrollrad)+"") IJ.run("Gaussian Blur...", "sigma=2 scaled") #reduces punctate character of grayscale image ' IM=IJ.selectWindow("IMAGE") SUB=IJ.selectWindow("SUBTRACT") ic().run("SUBTRACT", IM, SUB) #just subtracts two images IJ.selectWindow("IMAGE") #see how to call IJ.run("Duplicate...", "title=AND")#watershed IJ.run("Duplicate...", "title=CHECK")#for checking if maxima exist within selection later #Apply threshold to get binary image of cell borders (step IV) IJ.selectWindow("IMAGE") imp = IJ.getImage() # the current image imp.getProcessor().setThreshold(1, 255, ImageProcessor.NO_LUT_UPDATE) IJ.run("Subtract Background...","...") IJ.run("Convert to Mask", "method=Default background=Dark only black") IJ.run("Fill Holes") #Create watershed line image (step V) IJ.selectWindow("AND") IJ.run("Gaussian Blur...", "sigma=2 scaled") imp=IJ.getImage() pixelWidth=imp.getWidth() pixelWidth=pixelWidth/1647.89 pixelHeight= imp.getHeight() # Saving the image nResults=imp.getStatistics() row = nResults rt.setValue("Max ", 1, row.max) #text file nBins = 256 Hist = HistogramWindow("Histogram",imp,nBins) Table = Hist.getResultsTable() Counts = Table.getColumn(1) #mean gray value of pixels belonging to cells needed (i.e. mean of ONLY non-zero pixel) Sum = 0 #all counts CV = 0 #weighed counts (= counts * intensity) for i in range(0, len(Counts)): #starting with 1 instead of 0. -> 0 intensity values are not considered. Sum += Counts[i] CV += Counts[i]*i m = (CV/Sum) m=math.floor(m) l = math.floor(2*m) #Maxima need to be at least twice the intensity of cellular mean intensity IJ.run("Find Maxima...", "noise="+str(l)+" output=[Segmented Particles] exclude") #watershedding #Combine watershed lines and cell frame (step VI) IJ.selectWindow("IMAGE") imp=IJ.getImage() imp.getProcessor().setThreshold(1, 255, ImageProcessor.NO_LUT_UPDATE) IJ.run(imp, "Watershed", "") #useful imp = IJ.getImage() ip = imp.getProcessor() segip = MaximumFinder().findMaxima( ip, 1, ImageProcessor.NO_THRESHOLD, MaximumFinder.SEGMENTED , False, False) segip.invert() segimp = ImagePlus("seg", segip) segimp.show() mergeimp = RGBStackMerge.mergeChannels(array([segimp, None, None, imp, None, None, None], ImagePlus), True) mergeimp.show() pa_exist = WM.getWindow("Results for PA") if pa_exist==None: pa_rt= ResultsTable() else: pa_rt = pa_exist.getTextPanel().getOrCreateResultsTable() ParticleAnalyzer.setResultsTable(pa_rt) IJ.run("Set Measurements...", "area mean perimeter shape decimal=3") IJ.run("Analyze Particles...", "size=" + str(cellradius) + "-Infinity circularity=0.1-1.00 add"); #Cell bodies detected pa_rt.show("Results for PA ") save_all(srcDir, dstDir, filename, localfile, keepDirectories, imageID)
def run(imp, preprocessor_path, postprocessor_path, threshold_method, user_comment): output_parameters = { "image title": "", "preprocessor path": float, "post processor path": float, "thresholding op": float, "use ridge detection": bool, "high contrast": int, "low contrast": int, "line width": int, "minimum line length": int, "mitochondrial footprint": float, "branch length mean": float, "branch length median": float, "branch length stdevp": float, "summed branch lengths mean": float, "summed branch lengths median": float, "summed branch lengths stdevp": float, "network branches mean": float, "network branches median": float, "network branches stdevp": float } output_order = [ "image title", "preprocessor path", "post processor path", "thresholding op", "use ridge detection", "high contrast", "low contrast", "line width", "minimum line length", "mitochondrial footprint", "branch length mean", "branch length median", "branch length stdevp", "summed branch lengths mean", "summed branch lengths median", "summed branch lengths stdevp", "network branches mean", "network branches median", "network branches stdevp" ] # Perform any preprocessing steps... status.showStatus("Preprocessing image...") if preprocessor_path != None: if preprocessor_path.exists(): preprocessor_thread = scripts.run(preprocessor_path, True) preprocessor_thread.get() imp = WindowManager.getCurrentImage() else: pass # Store all of the analysis parameters in the table if preprocessor_path == None: preprocessor_str = "" else: preprocessor_str = preprocessor_path.getCanonicalPath() if postprocessor_path == None: postprocessor_str = "" else: postprocessor_str = preprocessor_path.getCanonicalPath() output_parameters["preprocessor path"] = preprocessor_str output_parameters["post processor path"] = postprocessor_str output_parameters["thresholding op"] = threshold_method output_parameters["use ridge detection"] = str(use_ridge_detection) output_parameters["high contrast"] = rd_max output_parameters["low contrast"] = rd_min output_parameters["line width"] = rd_width output_parameters["minimum line length"] = rd_length # Create and ImgPlus copy of the ImagePlus for thresholding with ops... status.showStatus("Determining threshold level...") imp_title = imp.getTitle() slices = imp.getNSlices() frames = imp.getNFrames() output_parameters["image title"] = imp_title imp_calibration = imp.getCalibration() imp_channel = Duplicator().run(imp, imp.getChannel(), imp.getChannel(), 1, slices, 1, frames) img = ImageJFunctions.wrap(imp_channel) # Determine the threshold value if not manual... binary_img = ops.run("threshold.%s" % threshold_method, img) binary = ImageJFunctions.wrap(binary_img, 'binary') binary.setCalibration(imp_calibration) binary.setDimensions(1, slices, 1) # Get the total_area if binary.getNSlices() == 1: area = binary.getStatistics(Measurements.AREA).area area_fraction = binary.getStatistics( Measurements.AREA_FRACTION).areaFraction output_parameters[ "mitochondrial footprint"] = area * area_fraction / 100.0 else: mito_footprint = 0.0 for slice in range(binary.getNSlices()): binary.setSliceWithoutUpdate(slice) area = binary.getStatistics(Measurements.AREA).area area_fraction = binary.getStatistics( Measurements.AREA_FRACTION).areaFraction mito_footprint += area * area_fraction / 100.0 output_parameters[ "mitochondrial footprint"] = mito_footprint * imp_calibration.pixelDepth # Generate skeleton from masked binary ... # Generate ridges first if using Ridge Detection if use_ridge_detection and (imp.getNSlices() == 1): skeleton = ridge_detect(imp, rd_max, rd_min, rd_width, rd_length) else: skeleton = Duplicator().run(binary) IJ.run(skeleton, "Skeletonize (2D/3D)", "") # Analyze the skeleton... status.showStatus("Setting up skeleton analysis...") skel = AnalyzeSkeleton_() skel.setup("", skeleton) status.showStatus("Analyzing skeleton...") skel_result = skel.run() status.showStatus("Computing graph based parameters...") branch_lengths = [] summed_lengths = [] graphs = skel_result.getGraph() for graph in graphs: summed_length = 0.0 edges = graph.getEdges() for edge in edges: length = edge.getLength() branch_lengths.append(length) summed_length += length summed_lengths.append(summed_length) output_parameters["branch length mean"] = eztables.statistical.average( branch_lengths) output_parameters["branch length median"] = eztables.statistical.median( branch_lengths) output_parameters["branch length stdevp"] = eztables.statistical.stdevp( branch_lengths) output_parameters[ "summed branch lengths mean"] = eztables.statistical.average( summed_lengths) output_parameters[ "summed branch lengths median"] = eztables.statistical.median( summed_lengths) output_parameters[ "summed branch lengths stdevp"] = eztables.statistical.stdevp( summed_lengths) branches = list(skel_result.getBranches()) output_parameters["network branches mean"] = eztables.statistical.average( branches) output_parameters["network branches median"] = eztables.statistical.median( branches) output_parameters["network branches stdevp"] = eztables.statistical.stdevp( branches) # Create/append results to a ResultsTable... status.showStatus("Display results...") if "Mito Morphology" in list(WindowManager.getNonImageTitles()): rt = WindowManager.getWindow( "Mito Morphology").getTextPanel().getOrCreateResultsTable() else: rt = ResultsTable() rt.incrementCounter() for key in output_order: rt.addValue(key, str(output_parameters[key])) # Add user comments intelligently if user_comment != None and user_comment != "": if "=" in user_comment: comments = user_comment.split(",") for comment in comments: rt.addValue(comment.split("=")[0], comment.split("=")[1]) else: rt.addValue("Comment", user_comment) rt.show("Mito Morphology") # Create overlays on the original ImagePlus and display them if 2D... if imp.getNSlices() == 1: status.showStatus("Generate overlays...") IJ.run(skeleton, "Green", "") IJ.run(binary, "Magenta", "") skeleton_ROI = ImageRoi(0, 0, skeleton.getProcessor()) skeleton_ROI.setZeroTransparent(True) skeleton_ROI.setOpacity(1.0) binary_ROI = ImageRoi(0, 0, binary.getProcessor()) binary_ROI.setZeroTransparent(True) binary_ROI.setOpacity(0.25) overlay = Overlay() overlay.add(binary_ROI) overlay.add(skeleton_ROI) imp.setOverlay(overlay) imp.updateAndDraw() # Generate a 3D model if a stack if imp.getNSlices() > 1: univ = Image3DUniverse() univ.show() pixelWidth = imp_calibration.pixelWidth pixelHeight = imp_calibration.pixelHeight pixelDepth = imp_calibration.pixelDepth # Add end points in yellow end_points = skel_result.getListOfEndPoints() end_point_list = [] for p in end_points: end_point_list.append( Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addIcospheres(end_point_list, Color3f(255.0, 255.0, 0.0), 2, 1 * pixelDepth, "endpoints") # Add junctions in magenta junctions = skel_result.getListOfJunctionVoxels() junction_list = [] for p in junctions: junction_list.append( Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addIcospheres(junction_list, Color3f(255.0, 0.0, 255.0), 2, 1 * pixelDepth, "junctions") # Add the lines in green graphs = skel_result.getGraph() for graph in range(len(graphs)): edges = graphs[graph].getEdges() for edge in range(len(edges)): branch_points = [] for p in edges[edge].getSlabs(): branch_points.append( Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addLineMesh(branch_points, Color3f(0.0, 255.0, 0.0), "branch-%s-%s" % (graph, edge), True) # Add the surface univ.addMesh(binary) univ.getContent("binary").setTransparency(0.5) # Perform any postprocessing steps... status.showStatus("Running postprocessing...") if postprocessor_path != None: if postprocessor_path.exists(): postprocessor_thread = scripts.run(postprocessor_path, True) postprocessor_thread.get() else: pass status.showStatus("Done analysis!")
def run(imp, preprocessor_path, postprocessor_path, threshold_method, user_comment): output_parameters = {"image title" : "", "preprocessor path" : float, "post processor path" : float, "thresholding op" : float, "use ridge detection" : bool, "high contrast" : int, "low contrast" : int, "line width" : int, "minimum line length" : int, "mitochondrial footprint" : float, "branch length mean" : float, "branch length median" : float, "branch length stdevp" : float, "summed branch lengths mean" : float, "summed branch lengths median" : float, "summed branch lengths stdevp" : float, "network branches mean" : float, "network branches median" : float, "network branches stdevp" : float} output_order = ["image title", "preprocessor path", "post processor path", "thresholding op", "use ridge detection", "high contrast", "low contrast", "line width", "minimum line length", "mitochondrial footprint", "branch length mean", "branch length median", "branch length stdevp", "summed branch lengths mean", "summed branch lengths median", "summed branch lengths stdevp", "network branches mean", "network branches median", "network branches stdevp"] # Perform any preprocessing steps... status.showStatus("Preprocessing image...") if preprocessor_path != None: if preprocessor_path.exists(): preprocessor_thread = scripts.run(preprocessor_path, True) preprocessor_thread.get() imp = WindowManager.getCurrentImage() else: pass # Store all of the analysis parameters in the table if preprocessor_path == None: preprocessor_str = "" else: preprocessor_str = preprocessor_path.getCanonicalPath() if postprocessor_path == None: postprocessor_str = "" else: postprocessor_str = preprocessor_path.getCanonicalPath() output_parameters["preprocessor path"] = preprocessor_str output_parameters["post processor path"] = postprocessor_str output_parameters["thresholding op"] = threshold_method output_parameters["use ridge detection"] = str(use_ridge_detection) output_parameters["high contrast"] = rd_max output_parameters["low contrast"] = rd_min output_parameters["line width"] = rd_width output_parameters["minimum line length"] = rd_length # Create and ImgPlus copy of the ImagePlus for thresholding with ops... status.showStatus("Determining threshold level...") imp_title = imp.getTitle() slices = imp.getNSlices() frames = imp.getNFrames() output_parameters["image title"] = imp_title imp_calibration = imp.getCalibration() imp_channel = Duplicator().run(imp, imp.getChannel(), imp.getChannel(), 1, slices, 1, frames) img = ImageJFunctions.wrap(imp_channel) # Determine the threshold value if not manual... binary_img = ops.run("threshold.%s"%threshold_method, img) binary = ImageJFunctions.wrap(binary_img, 'binary') binary.setCalibration(imp_calibration) binary.setDimensions(1, slices, 1) # Get the total_area if binary.getNSlices() == 1: area = binary.getStatistics(Measurements.AREA).area area_fraction = binary.getStatistics(Measurements.AREA_FRACTION).areaFraction output_parameters["mitochondrial footprint"] = area * area_fraction / 100.0 else: mito_footprint = 0.0 for slice in range(binary.getNSlices()): binary.setSliceWithoutUpdate(slice) area = binary.getStatistics(Measurements.AREA).area area_fraction = binary.getStatistics(Measurements.AREA_FRACTION).areaFraction mito_footprint += area * area_fraction / 100.0 output_parameters["mitochondrial footprint"] = mito_footprint * imp_calibration.pixelDepth # Generate skeleton from masked binary ... # Generate ridges first if using Ridge Detection if use_ridge_detection and (imp.getNSlices() == 1): skeleton = ridge_detect(imp, rd_max, rd_min, rd_width, rd_length) else: skeleton = Duplicator().run(binary) IJ.run(skeleton, "Skeletonize (2D/3D)", "") # Analyze the skeleton... status.showStatus("Setting up skeleton analysis...") skel = AnalyzeSkeleton_() skel.setup("", skeleton) status.showStatus("Analyzing skeleton...") skel_result = skel.run() status.showStatus("Computing graph based parameters...") branch_lengths = [] summed_lengths = [] graphs = skel_result.getGraph() for graph in graphs: summed_length = 0.0 edges = graph.getEdges() for edge in edges: length = edge.getLength() branch_lengths.append(length) summed_length += length summed_lengths.append(summed_length) output_parameters["branch length mean"] = eztables.statistical.average(branch_lengths) output_parameters["branch length median"] = eztables.statistical.median(branch_lengths) output_parameters["branch length stdevp"] = eztables.statistical.stdevp(branch_lengths) output_parameters["summed branch lengths mean"] = eztables.statistical.average(summed_lengths) output_parameters["summed branch lengths median"] = eztables.statistical.median(summed_lengths) output_parameters["summed branch lengths stdevp"] = eztables.statistical.stdevp(summed_lengths) branches = list(skel_result.getBranches()) output_parameters["network branches mean"] = eztables.statistical.average(branches) output_parameters["network branches median"] = eztables.statistical.median(branches) output_parameters["network branches stdevp"] = eztables.statistical.stdevp(branches) # Create/append results to a ResultsTable... status.showStatus("Display results...") if "Mito Morphology" in list(WindowManager.getNonImageTitles()): rt = WindowManager.getWindow("Mito Morphology").getTextPanel().getOrCreateResultsTable() else: rt = ResultsTable() rt.incrementCounter() for key in output_order: rt.addValue(key, str(output_parameters[key])) # Add user comments intelligently if user_comment != None and user_comment != "": if "=" in user_comment: comments = user_comment.split(",") for comment in comments: rt.addValue(comment.split("=")[0], comment.split("=")[1]) else: rt.addValue("Comment", user_comment) rt.show("Mito Morphology") # Create overlays on the original ImagePlus and display them if 2D... if imp.getNSlices() == 1: status.showStatus("Generate overlays...") IJ.run(skeleton, "Green", "") IJ.run(binary, "Magenta", "") skeleton_ROI = ImageRoi(0,0,skeleton.getProcessor()) skeleton_ROI.setZeroTransparent(True) skeleton_ROI.setOpacity(1.0) binary_ROI = ImageRoi(0,0,binary.getProcessor()) binary_ROI.setZeroTransparent(True) binary_ROI.setOpacity(0.25) overlay = Overlay() overlay.add(binary_ROI) overlay.add(skeleton_ROI) imp.setOverlay(overlay) imp.updateAndDraw() # Generate a 3D model if a stack if imp.getNSlices() > 1: univ = Image3DUniverse() univ.show() pixelWidth = imp_calibration.pixelWidth pixelHeight = imp_calibration.pixelHeight pixelDepth = imp_calibration.pixelDepth # Add end points in yellow end_points = skel_result.getListOfEndPoints() end_point_list = [] for p in end_points: end_point_list.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addIcospheres(end_point_list, Color3f(255.0, 255.0, 0.0), 2, 1*pixelDepth, "endpoints") # Add junctions in magenta junctions = skel_result.getListOfJunctionVoxels() junction_list = [] for p in junctions: junction_list.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addIcospheres(junction_list, Color3f(255.0, 0.0, 255.0), 2, 1*pixelDepth, "junctions") # Add the lines in green graphs = skel_result.getGraph() for graph in range(len(graphs)): edges = graphs[graph].getEdges() for edge in range(len(edges)): branch_points = [] for p in edges[edge].getSlabs(): branch_points.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addLineMesh(branch_points, Color3f(0.0, 255.0, 0.0), "branch-%s-%s"%(graph, edge), True) # Add the surface univ.addMesh(binary) univ.getContent("binary").setTransparency(0.5) # Perform any postprocessing steps... status.showStatus("Running postprocessing...") if postprocessor_path != None: if postprocessor_path.exists(): postprocessor_thread = scripts.run(postprocessor_path, True) postprocessor_thread.get() else: pass status.showStatus("Done analysis!")
def runScript(): # find table with trajectories rt = findResultsTable(inputTableName) if rt == None: print("Results table window titled [" + inputTableName + "] not found!") return # get input image and its properties img = WindowManager.getCurrentImage() if img == None: print("Could not access input image!") return print("Processing image:", img) xLen = img.getWidth() yLen = img.getHeight() zLen = img.getNSlices() noOfFrames = img.getNFrames() noOfChannels = img.getNChannels() stack = img.getStack() if (noOfChannels > 1): print( "Cannot process images with channels. Convert image to single channel first!" ) return # Start processin data row by row... numOfRows = rt.getCounter() if numOfRows > 1: #create output tableName if showOutputTable: outputTable = ResultsTable() # if output in csv format requested print header if printOutputData: print("trajectory;frame;m0;sizeInPixels;avgIntensity") for idx in range(0, numOfRows): trajectoryId = rt.getValue("Trajectory", idx) x = rt.getValue("x", idx) y = rt.getValue("y", idx) z = rt.getValue("z", idx) frame = rt.getValue("Frame", idx) m0, size, avgInt = getIntensityData(stack, radius, frame, x, y, z, xLen, yLen, zLen) # if output in csv format requested print it if printOutputData: print( str(int(trajectoryId)) + ";" + str(frame) + ";" + str(m0) + ";" + str(size) + ";" + str(avgInt)) # if output table requested update it with data if showOutputTable: outputTable.incrementCounter() outputTable.addValue("", idx + 1) outputTable.addValue("trajectory", int(trajectoryId)) outputTable.addValue("frame", frame) outputTable.addValue("m0", m0) outputTable.addValue("sizeInPixels", size) outputTable.addValue("avgIntensity", avgInt) if showOutputTable: outputTable.show(outputTableName)
# paOpt = PA.CLEAR_WORKSHEET +\ paOpt = PA.SHOW_OUTLINES + PA.EXCLUDE_EDGE_PARTICLES # +\ # PA.INCLUDE_HOLES #+ \ # PA.SHOW_RESULTS measOpt = PA.AREA + PA.CENTROID + PA.SLICE # + PA.SHAPE_DESCRIPTORS + PA.INTEGRATED_DENSITY rt = ResultsTable() MINSIZE = 2 MAXSIZE = 10000 pa = PA(paOpt, measOpt, rt, MINSIZE, MAXSIZE) pa.setHideOutputImage(True) # pa.processStack = True for i in range(imp.getStackSize()): imp.setSlice(i + 1) pa.analyze(imp) # pa.getOutputImage().show() rt.show("cells") # rt = ResultsTable.open2(path) dotlinker = DotLinker(loadmethod, rt) # better there is a constructor also with linkkost function object. dotlinker.setTrajectoryThreshold(5) dotlinker.setShowTrackTable(False) # dotlinker = DotLinker(loadmethod) linkcostfunction = dotlinker.setLinkCostFunction(lcAD) linkcostfunction.setParameters(5.0, 2.0) rtout = dotlinker.doLinking(False) rtout.show("Tracks") vd = ViewDynamicsArea(imp) vd.plotAreaDynamics(rtout, vd.PLOTALL) imp.show()
rois = load_rois(item['roi']) if len(imps) < ais_chno or len(imps) < nucleus_chno: print 'Image %s has %d channels. Cannot process AIS segmentation for channel %d. Skipping.' % (item['img'], len(imps), ais_chno) else: if show_img: composite.show() #for i in imps: # i.show() results, background = process_image(imps, rois, ais_chno, nucleus_chno, bg_roino=3, average=average, sample_width=ais_linewidth, method=ais_method, threshold=ais_threshold) for roiresult in results: ais_roi = roiresult['ais-roi'] nucleus_roi = roiresult['nucleus-roi'] ais_image = roiresult['ais-image'] overlay.add(ais_roi) overlay.add(nucleus_roi) rt = roiresult['table'] rt_title = '%s-%s-Results' % (composite.getTitle(), roiresult['roi-name']) rt.saveAs(os.path.join(outputdir, '%s.csv' % rt_title)) add_to_summary(summary_rt, composite.getTitle(), roiresult['roi-name'], rt) if show_plot: rt.show(rt_title) roiresult['plot'].show() save_as_tif(outputdir, '%s-%s-AIS-straight' % (composite.getTitle(), roiresult['roi-name']), ais_image) save_roi(outputdir, '%s-%s-AIS-ROI' % (composite.getTitle(), roiresult['roi-name']), ais_roi) save_roi(outputdir, '%s-%s-nucleus-ROI' % (composite.getTitle(), roiresult['roi-name']), nucleus_roi) if show_img: ais_image.show() composite.setOverlay(overlay) summary_rt.show(AIS_SUMMARY_TABLE) print 'Done.\n'
def measure(stack, cells, nuclei): time = [ (t-1)*cal.frameInterval for t in range(T+1) ] cellValues0 = [ 0.0 for t in range(T+1) ] cellValues1 = [ 0.0 for t in range(T+1) ] cellAreas0 = [ 0.0 for t in range(T+1) ] cellAreas1 = [ 0.0 for t in range(T+1) ] nucleusValues0 = [ 0.0 for t in range(T+1) ] nucleusValues1 = [ 0.0 for t in range(T+1) ] nucleusAreas0 = [ 0.0 for t in range(T+1) ] nucleusAreas1 = [ 0.0 for t in range(T+1) ] nonNucleusValues0 = [ 0.0 for t in range(T+1) ] nonNucleusValues1 = [ 0.0 for t in range(T+1) ] for t in range(1,T+1): ip = stack.getProcessor(t) if cells[t] is None: continue #subtract background Z from all intensity Z measurements if cells [t] is None: print("Nocellsfound" + str(t)) bothCells = ShapeRoi(cells[t][0]).or(ShapeRoi(cells[t][1])) backRoi = ShapeRoi(Rectangle(0,0,imp.getWidth(),imp.getHeight())).not( bothCells ) ip.setRoi(backRoi) backMean = ip.getStatistics().mean ip.setRoi( cells[t][0] ) stats0 = ip.getStatistics() cellValues0[t] = stats0.mean - backMean cellAreas0[t] = stats0.area * cal.pixelWidth * cal.pixelHeight nuc0 = None for nuc in nuclei[t]: rect = nuc.getBounds() nx = int(rect.x+(rect.width/2.0)) ny = int(rect.y+(rect.height/2.0)) if cells[t][0].contains(nx,ny): nuc0 = nuc break if nuc0 is not None: ip.setRoi( nuc0 ) nucStats0 = ip.getStatistics() nucleusValues0[t] = nucStats0.mean - backMean nucleusAreas0[t] = nucStats0.area * cal.pixelWidth * cal.pixelHeight nuc0.setPosition(0,0,t) nuc0.setStrokeColor(Color.CYAN) ol.add(nuc0) nonnucRoi0 = ShapeRoi(cells[t][0]).not( ShapeRoi(nuc0) ) ip.setRoi( nonnucRoi0 ) nonNucleusValues0[t] = ip.getStatistics().mean - backMean ip.setRoi( cells[t][1] ) stats1 = ip.getStatistics() cellValues1[t] = stats1.mean - backMean cellAreas1[t] = stats1.area * cal.pixelWidth * cal.pixelHeight nuc1 = None for nuc in nuclei[t]: rect = nuc.getBounds() nx = int(rect.x+(rect.width/2.0)) ny = int(rect.y+(rect.height/2.0)) if cells[t][1].contains(nx,ny): nuc1 = nuc break if nuc1 is not None: ip.setRoi( nuc1 ) nucStats1 = ip.getStatistics() nucleusValues1[t] = nucStats1.mean - backMean nucleusAreas1[t] = nucStats1.area * cal.pixelWidth * cal.pixelHeight nuc1.setPosition(0,0,t) nuc1.setStrokeColor(Color.CYAN) ol.add(nuc1) nonnucRoi1 = ShapeRoi(cells[t][1]).not( ShapeRoi(nuc1) ) ip.setRoi( nonnucRoi1 ) nonNucleusValues1[t] = ip.getStatistics().mean - backMean rt = ResultsTable() rt.showRowNumbers(False) for t in range(1,T+1): rt.setValue("Time ("+cal.getTimeUnit()+")", t-1, IJ.d2s(time[t],1)) areaRatio = cellAreas0[t] / cellAreas1[t] if cellAreas0[t]>0 and cellAreas1[t]>0 else 0.0 rt.setValue("Cell 0:Cell 1 Area Ratio", t-1, areaRatio) nucleusRatio = nucleusValues0[t] / nucleusValues1[t] if nucleusValues0[t]>0 and nucleusValues1[t]>0 else 0.0 rt.setValue("Cell 0:Cell 1 Nucleus Ratio", t-1, nucleusRatio) nonNucleusRatio = nonNucleusValues0[t] / nonNucleusValues1[t] if nonNucleusValues0[t]>0 and nonNucleusValues1[t]>0 else 0.0 rt.setValue("Cell 0:Cell 1 Non-Nucleus Ratio", t-1, nonNucleusRatio) nnnRatio0 = nucleusValues0[t] / nonNucleusValues0[t] if nucleusValues0[t]>0 and nonNucleusValues0[t]>0 else 0.0 rt.setValue("Cell 0 Nucleus:Non-Nucleus Ratio", t-1, nnnRatio0) nnnRatio1 = nucleusValues1[t] / nonNucleusValues1[t] if nucleusValues1[t]>0 and nonNucleusValues1[t]>0 else 0.0 rt.setValue("Cell 1 Nucleus:Non-Nucleus Ratio", t-1, nnnRatio1) rt.setValue("Cell 0 (red) Area ("+cal.getUnit()+u"\u00b2"+")", t-1, cellAreas0[t]) rt.setValue("Cell 0 Nucleus Area ("+cal.getUnit()+u"\u00b2"+")", t-1, nucleusAreas0[t]) rt.setValue("Cell 0 All", t-1, cellValues0[t]) rt.setValue("Cell 0 Nucleus", t-1, nucleusValues0[t]) rt.setValue("Cell 0 Non-Nucleus", t-1, nonNucleusValues0[t]) rt.setValue("Cell 1 (green) Area ("+cal.getUnit()+u"\u00b2"+")", t-1, cellAreas1[t]) rt.setValue("Cell 1 Nucleus Area ("+cal.getUnit()+u"\u00b2"+")", t-1, nucleusAreas1[t]) rt.setValue("Cell 1 All", t-1, cellValues1[t]) rt.setValue("Cell 1 Nucleus", t-1, nucleusValues1[t]) rt.setValue("Cell 1 Non-Nucleus", t-1, nonNucleusValues1[t]) rt.show(imp.getTitle()+"-Results") dataset = DefaultXYDataset() dataset.addSeries( "Cell 0", [time[1:], cellValues0[1:]] ) dataset.addSeries( "Cell 1", [time[1:], cellValues1[1:]] ) dataset.addSeries( "Nucleus 0", [time[1:], nucleusValues0[1:]] ) dataset.addSeries( "Nucleus 1", [time[1:], nucleusValues1[1:]] ) dataset.addSeries( "Non-Nucleus 0", [time[1:], nonNucleusValues0[1:]] ) dataset.addSeries( "Non-Nucleus 1", [time[1:], nonNucleusValues1[1:]] ) chart = ChartFactory.createScatterPlot( imp.getTitle(), "Time ("+cal.getTimeUnit()+")", "Intensity Z", dataset, PlotOrientation.VERTICAL, True,True,False ) plot = chart.getPlot() plot.setBackgroundPaint(Color(64, 128, 255)) plot.setDomainGridlinePaint(Color.BLACK) plot.setRangeGridlinePaint(Color.BLACK) renderer = plot.getRenderer() legend = LegendItemCollection() shapeR = 2.0 nucShape = Ellipse2D.Float(-shapeR,-shapeR,shapeR*2,shapeR*2) nonNucShape = Path2D.Float() nonNucShape.moveTo(-shapeR,-shapeR) nonNucShape.lineTo(shapeR,shapeR) nonNucShape.moveTo(shapeR,-shapeR) nonNucShape.lineTo(-shapeR,shapeR) for s in range(dataset.getSeriesCount()): if s == 0: renderer.setSeriesLinesVisible(s, True) renderer.setSeriesShapesVisible(s, False) renderer.setSeriesStroke(s, BasicStroke(1)) renderer.setSeriesPaint(s, Color.RED) legend.add( LegendItem("Cell 0", Color.RED) ) elif s == 1: renderer.setSeriesLinesVisible(s, True) renderer.setSeriesShapesVisible(s, False) renderer.setSeriesStroke(s, BasicStroke(1)) renderer.setSeriesPaint(s, Color.GREEN) legend.add( LegendItem("Cell 1", Color.GREEN) ) elif s == 2: renderer.setSeriesLinesVisible(s, False) renderer.setSeriesShapesVisible(s, True) renderer.setSeriesShape(s, nucShape) renderer.setSeriesPaint(s, Color.RED) elif s == 3: renderer.setSeriesLinesVisible(s, False) renderer.setSeriesShapesVisible(s, True) renderer.setSeriesShape(s, nucShape) renderer.setSeriesPaint(s, Color.GREEN) elif s == 4: renderer.setSeriesLinesVisible(s, False) renderer.setSeriesShapesVisible(s, True) renderer.setSeriesShape(s, nonNucShape) renderer.setSeriesPaint(s, Color.RED) elif s == 5: renderer.setSeriesLinesVisible(s, False) renderer.setSeriesShapesVisible(s, True) renderer.setSeriesShape(s, nonNucShape) renderer.setSeriesPaint(s, Color.GREEN) plot.setFixedLegendItems(legend) frame = ChartFrame(imp.getTitle()+" Z-Normalised Intensity", chart) frame.pack() frame.setSize( Dimension(800, 800) ) frame.setLocationRelativeTo(None) frame.setVisible(True)
def main(): rt = RT.open2(table_file.getAbsolutePath()) if not rt: return log(" --- --- --- ") log("Loaded %s" % table_file.getAbsolutePath()) log("Loading column lists...") # Get column indices from imported file headings = getColumnHeadings(rt) id_col = getColumnIndex(headings, "TID") t_col = getColumnIndex(headings, "t [") d2p_col = getColumnIndex(headings, "D2P [") angle_col = getColumnIndex(headings, u'\u03B1 [deg]') delta_col = getColumnIndex(headings, u'\u0394\u03B1 [deg]') if angle_col == RT.COLUMN_NOT_FOUND: log("Failed to detect index for angle column. Re-trying...") angle_col = getColumnIndex(headings, u'? [deg]') if delta_col == RT.COLUMN_NOT_FOUND: log("Failed to detect index for delta angle column. Re-trying...") delta_col = getColumnIndex(headings, u'?? [deg]') log("Last column index is %s" % rt.getLastColumn()) if RT.COLUMN_NOT_FOUND in (id_col, d2p_col, delta_col, angle_col): uiservice.showDialog("Error: Some key columns were not found!", "Invalid Table?") return log("Settings: BOUT_WINDOW= %s, MIN_D2P= %s, DEF_FRAME_INTERVAL= %s" % (BOUT_WINDOW, '{0:.4f}'.format(MIN_D2P), DEF_FRAME_INTERVAL)) # Store all data on dedicated lists track_id_rows = rt.getColumnAsDoubles(id_col) d2p_rows = rt.getColumnAsDoubles(d2p_col) angle_rows = rt.getColumnAsDoubles(angle_col) delta_rows = rt.getColumnAsDoubles(delta_col) t_rows = rt.getColumnAsDoubles(t_col) # Assess n of data points and extract unique path ids n_rows = len(track_id_rows) row_indices = range(n_rows) track_ids = set(track_id_rows) n_tracks = len(track_ids) log("Table has %g rows" % n_rows) log("Table has %g tracks" % n_tracks) log("Parsing tracks...") for track_id in track_ids: for row, next_row in zip(row_indices, row_indices[1:]): if track_id_rows[row] != track_id: continue if not isNumber(angle_rows[row]): rt.setValue("FLAG", row, "NA") continue lower_bound = max(0, row - BOUT_WINDOW + 1) upper_bound = min(n_rows-1, row + BOUT_WINDOW) win_d2p = [] for _ in range(lower_bound, upper_bound): win_d2p.append(d2p_rows[row]) if sum(win_d2p) <= MIN_D2P * len(win_d2p): rt.setValue("FLAG", row, 0) else: current_angle = angle_rows[row] next_angle = angle_rows[next_row] current_delta = delta_rows[row] flag = -1 if current_angle < 0 else 1 delta_change = (abs(current_delta) > 90) same_sign = ((current_angle<0) == (next_angle<0)) if delta_change and not same_sign: flag *= -1 rt.setValue("FLAG", row, flag) if next_row == n_rows - 1: rt.setValue("FLAG", next_row, flag) if rt.save(table_file.getAbsolutePath()): log("Processed table successfully saved (file overwritten)") else: log("Could not override input file. Displaying it...") rt.show(table_file.name) log("Creating onset table...") onset_rt = RT() onset_rt.showRowNumbers(False) frame_int = DEF_FRAME_INTERVAL if "table" in frame_rate_detection: frame_int = getFrameIntervalFromTable(row_indices, track_id_rows, t_rows) elif "image" in frame_rate_detection: frame_int = getFrameIntervalFromImage(image_file.getAbsolutePath()) else: log("Using default frame rate") for track_id in track_ids: for prev_row, row in zip(row_indices, row_indices[1:]): if not track_id in (track_id_rows[prev_row], track_id_rows[row]): continue flag = rt.getValue("FLAG", row) if not isNumber(flag): continue flag = int(flag) if flag == 0: continue if flag == 1 or flag == -1: srow = onset_rt.getCounter() onset_rt.incrementCounter() onset_rt.setValue("TID", srow, track_id) from_frame = int(t_rows[prev_row]/frame_int) + 1 to_frame = int(t_rows[row]/frame_int) + 1 onset_rt.setValue("First disp. [t]", srow, "%s to %s" % (t_rows[prev_row], t_rows[row])) onset_rt.setValue("First disp. [frames]", srow, "%s to %s" % (from_frame, to_frame)) onset_rt.setValue("ManualTag", srow, "") break out_path = suffixed_path(table_file.getAbsolutePath(), "ManualTagging") if onset_rt.save(out_path): log("Summary table successfully saved: %s" % out_path) else: log("File not saved... Displaying onset table") onset_rt.show("Onsets %s" % table_file.name)
thr1, thrimp1 = calculateThreshold(imp1, roi, methods[0]) thr2, thrimp2 = calculateThreshold(imp2, roi, methods[1]) cursor = TwinCursor(img1.randomAccess(), img2.randomAccess(), Views.iterable(mask).localizingCursor()) rtype = img1.randomAccess().get().createVariable() raw = manders.calculateMandersCorrelation(cursor, rtype) rthr1 = rtype.copy() rthr2 = rtype.copy() rthr1.set(thr1) rthr2.set(thr2) cursor.reset() thrd = manders.calculateMandersCorrelation(cursor, rthr1, rthr2, ThresholdMode.Above) print "Results are: %f %f %f %f" % (raw.m1, raw.m2, thrd.m1, thrd.m2) results.incrementCounter() rowno = results.getCounter() - 1 results.setValue("Cell", rowno, int(rowno)) results.setValue("Threshold 1", rowno, int(thr1)) results.setValue("Threshold 2", rowno, int(thr2)) results.setValue("M1 raw", rowno, float(raw.m1)) results.setValue("M2 raw", rowno, float(raw.m2)) results.setValue("M1 thrd", rowno, float(thrd.m1)) results.setValue("M2 thrd", rowno, float(thrd.m2)) thrimp = RGBStackMerge.mergeChannels([thrimp1, thrimp2], False) saver = FileSaver(thrimp) saver.saveAsTiffStack(outputDir + "Cell_%i-" % results.getCounter() + title + ".tif") thrimp.close() results.show("Colocalization results")
def batch_open_Rois(pathRoi, file_typeRoi=None, name_filterRoi=None, recursive=False): '''Open all files in the given folder. :param path: The path from were to open the Rois. String and java.io.File are allowed. :param file_type: Only accept files with the given extension (default: None). :param name_filter: Reject files that contain the given string (default: wild characters). :param recursive: Process directories recursively (default: False). ''' # Converting a File object to a string. if isinstance(pathRoi, File): pathRoi = pathRoi.getAbsolutePath() def check_type(string): '''This function is used to check the file type. It is possible to use a single string or a list/tuple of strings as filter. This function can access the variables of the surrounding function. :param string: The filename to perform the check on. ''' if file_typeRoi: # The first branch is used if file_type is a list or a tuple. if isinstance(file_typeRoi, (list, tuple)): for file_type_ in file_typeRoi: if string.endswith(file_type_): # Exit the function with True. return True else: # Next iteration of the for loop. continue # The second branch is used if file_type is a string. elif isinstance(file_typeRoi, string): if string.endswith(file_typeRoi): return True else: return False return False # Accept all files if file_type is None. else: return True # We collect all files to open in a list. path_to_Roi = [] # Replacing some abbreviations (e.g. $HOME on Linux). path = os.path.expanduser(pathRoi) # If we don't want a recursive search, we can use os.listdir(). if not recursive: for file_name in os.listdir(pathRoi): full_path = os.path.join(pathRoi, file_name) if os.path.isfile(full_path): if check_type(file_name): path_to_Roi.append(full_path) # For a recursive search os.walk() is used. else: # os.walk() is iterable. # Each iteration of the for loop processes a different directory. # the first return value represents the current directory. # The second return value is a list of included directories. # The third return value is a list of included files. for directory, dir_names, file_names in os.walk(pathRoi): # We are only interested in files. for file_name in file_names: # The list contains only the file names. # The full path needs to be reconstructed. full_path = os.path.join(directory, file_name) # Both checks are performed to filter the files. if check_type(file_name): # Add the file to the list of Rois to open. path_to_Roi.append([ full_path, os.path.basename(os.path.splitext(full_path)[0]) ]) # Create the list that will be returned by this function. RoisX = [] RoisY = [] print('path', path_to_Roi) for roi_path in path_to_Roi: print('path', roi_path) # An object equals True and None equals False. rm = RoiManager.getInstance() if (rm == None): rm = RoiManager() Roi = IJ.open(roi_path) roi_points = rm.getRoisAsArray() table = ResultsTable() for Roi in roi_points: xpoints = Roi.getPolygon().xpoints ypoints = Roi.getPolygon().ypoints for i in range(len(xpoints)): table.incrementCounter() table.addValue("Index", i) table.addValue("X", xpoints[i]) table.addValue("Y", ypoints[i]) table.show("XY-Coordinates") return roi_points
while goRun: wfud = WaitForUserDialog("Pick freehand ROI, then hit OK to analyze") wfud.show() roi = theImage.getRoi() if roi is None: goRun = False else: dataImage.setRoi(roi) subImage = dataImage.duplicate() dataIp = dataImage.getProcessor() dataIp.setRoi(roi) maskIp = dataIp.getMask() maskImage = ImagePlus("Mask Image",maskIp) ic = ImageCalculator() countingImage = ic.run("AND create stack",subImage,maskImage) pixelCount = 0 for i in range(1,countingImage.getNSlices()+1): countingImage.setSlice(i) countingIp = countingImage.getProcessor() for x in range(0,countingImage.getWidth()): for y in range(0,countingImage.getHeight()): if (countingIp.getPixel(x,y) >= intensityThreshold): pixelCount = pixelCount + 1 totAvailablePixels = countingImage.getWidth() * countingImage.getHeight() * countingImage.getNSlices() #IJ.log("Pixel count: " + str(pixelCount) + " of " + str(totAvailablePixels)) countingImage.close() rt.incrementCounter() rt.addValue("PosPixels",pixelCount) rt.addValue("TotPixels",totAvailablePixels) rt.show("DMI Results")
print "...ch2 mean ", stats.mean print ".." print "...ch3 TotalInt ", statsch3.area * statsch3.mean print "...ch3 Area ", statsch3.area print "...ch3 mean ", statsch3.mean rt.incrementCounter() rt.setValue("DotID", ct, i) rt.setValue("DotX", ct, yA[i]) rt.setValue("DotY", ct, xA[i]) rt.setValue("DotZ", ct, zA[i]) rt.setValue("Ch2_TotalIntensity", ct, stats.area * stats.mean) rt.setValue("Ch2_MeanIntensity", ct, stats.mean) rt.setValue("Ch3_TotalIntensity", ct, statsch3.area * statsch3.mean) rt.setValue("Ch3_meanIntensity", ct, statsch3.mean) ct += 1 rt.show("Dot Intensity") #AREA, AREA_FRACTION, CENTER_OF_MASS, CENTROID, CIRCULARITY, ELLIPSE, FERET, #INTEGRATED_DENSITY, INVERT_Y, KURTOSIS, LABELS, LIMIT, MAX_STANDARDS, MEAN, #MEDIAN, MIN_MAX, MODE, PERIMETER, RECT, SCIENTIFIC_NOTATION, SHAPE_DESCRIPTORS, #SKEWNESS, SLICE, STACK_POSITION, STD_DEV # preparing merged stack with detected dots. merge = RGBStackMerge() #stacks = Array() #stacks[0] = imp2.getImageStack() #stacks[1] = imp.getImageStack() #imgconv = ImageConverter(imp) #imgconv.setDoScaling(True)
def open_Octopus_file(): # set up a file info structure fi = FileInfo() fi.fileFormat = fi.RAW fi.fileType=FileInfo.GRAY16_UNSIGNED fi.intelByteOrder = True fi.nImages = 1 op = OpenDialog("Choose Octopus .dth file...", "") if not op.getDirectory(): return False # get the file extension file_extension = re.search('(\.[a-z][a-z][a-z])', op.getFileName()).group(1) if file_extension != ".dth": dlg = GenericDialog("Warning") dlg.addMessage("Please select an octopus .dth file") dlg.showDialog() return False # now strip the filename into a stem and index file_parse = re.match('([a-zA-z0-9_]*_)([0-9]+)\.dth', op.getFileName()) file_stem = file_parse.group(1) file_index = int( file_parse.group(2) ) # ok now we need to parse the header info header = get_Octopus_header(op.getDirectory(), file_stem, file_index) fi.nImages = len(header['N']) # check to see whether we have a bit depth, if not, assume 16-bit if 'Bit_Depth' in header: print header['Bit_Depth'] bit_depth = int(header['Bit_Depth'][0]) if bit_depth == 8: fi.fileType = FileInfo.GRAY8 else: bit_depth = 16 # will assume that all files have the same size fi.width = int( header['W'][0] ) fi.height = int( header['H'][0] ) file_timestamp = strftime("%a, %d %b %Y %H:%M:%S", gmtime(float(header['Time'][0])) ) # make a new imagestack to store the data stack = ImageStack(fi.width, fi.height) # finally, we need to make a list of files to import as sometimes we have # non contiguous file numbers try: files = os.listdir(op.getDirectory()) except IOError: raise IOError( "No files exist in directory: " + op.getDirectory()) filenums = [] for f in files: # strip off the stem, and get the number targetfile = re.match(file_stem+'([0-9]+)\.dth', f) # only take thosefiles which match the formatting requirements if targetfile: filenums.append( int(targetfile.group(1)) ) # sort the file numbers sorted_filenums = sorted(filenums) # make a file stats string file_stats_str = file_stem + '\n' + str(fi.width) +'x' + str(fi.height) + 'x' + \ str(len(sorted_filenums)) +' ('+str(bit_depth)+'-bit)\n' + file_timestamp # now open a dialog to let the user set options dlg = GenericDialog("Load Octopus Stream (v"+__version__+")") dlg.addMessage(file_stats_str) dlg.addStringField("Title: ", file_stem) dlg.addNumericField("Start: ", 1, 0); dlg.addNumericField("End: ", len(sorted_filenums), 0) dlg.addCheckbox("Open headers", True) dlg.addCheckbox("Contiguous stream?", False) dlg.addCheckbox("8-bit unsigned", bit_depth==8) dlg.showDialog() # if we cancel the dialog, exit here if dlg.wasCanceled(): return # set some params file_title = dlg.getNextString() file_start = dlg.getNextNumber() file_end = dlg.getNextNumber() DISPLAY_HEADER = bool( dlg.getNextBoolean() ) # check the ranges if file_start > file_end: file_start, file_end = file_end, file_start if file_start < 1: file_start = 1 if file_end > len(sorted_filenums): file_end = len(sorted_filenums) # now set these to the actual file numbers in the stream file_start = sorted_filenums[int(file_start)-1] file_end = sorted_filenums[int(file_end)-1] files_to_open = [n for n in sorted_filenums if n>=file_start and n<=file_end] # if we've got too many, truncate the list if (len(files_to_open) * fi.nImages * fi.width * fi.height) > (MAX_FRAMES_TO_IMPORT*512*512): dlg = GenericDialog("Warning") dlg.addMessage("This may use a lot of memory. Continue?") dlg.showDialog() if dlg.wasCanceled(): return False IJ.log( "Opening file: " + op.getDirectory() + op.getFileName() ) IJ.log( file_stats_str + "\nFile range: " + str(files_to_open[0]) + \ "-" + str(files_to_open[-1]) +"\n" ) # make a results table for the metadata # NOTE: horrible looping at the moment, but works if DISPLAY_HEADER: rt = ResultsTable() # ok now we can put the files together into the stack for i in files_to_open: # open the original .dat file and get the stack fi.fileName = get_Octopus_filename( op.getDirectory(), file_stem, i) if os.path.isfile( fi.fileName ): fo = FileOpener(fi) imp = fo.open(False).getStack() # put the slices into the stack for im_slice in xrange( imp.getSize() ): ip = imp.getProcessor( im_slice+1 ) if bit_depth == 8: bi = ip.getBufferedImage() else: bi = ip.get16BitBufferedImage() stack.addSlice( file_title, ip ) if DISPLAY_HEADER: header = get_Octopus_header(op.getDirectory(), file_stem, i) for n in xrange(len(header['N'])): rt.incrementCounter() for k in header.keys(): rt.addValue(k, parse_header( header[k][n] ) ) else: break # done! output = ImagePlus('Octopus ('+file_stem+')', stack) output.show() if DISPLAY_HEADER: rt.show("Octopus header metadata") return True
thresholdImp = clij2.pull(gfx3) labelImp = clij2.pull(gfx5) gfx4 = clij2.push(quantImp) IJ.setMinAndMax(thresholdImp, 0, 1) thresholdImp.setCalibration(cal) thresholdImp.setTitle("Binary mask of " + originalTitle) #add the images to concatenated stacks conThresholdStack = concatStacks(conThresholdStack, thresholdImp) conlabelImpStack = concatStacks(conlabelImpStack, labelImp) table = quantify(gfx4, gfx5, table, nFrame, originalTitle) thresholdImp.close() labelImp.close() IJ.log("Processing timeframe: " + str(nFrame)) table.show("Results of " + originalTitle) #Show the images and make the images pretty... I should have put in a function` conThresholdImp = ImagePlus("Threshold image for " + originalTitle, conThresholdStack) conThresholdImp.setDimensions(1, imp1.getNSlices(), imp1.getNFrames()) IJ.setMinAndMax(conThresholdImp, 0, 1) conThresholdImp.setCalibration(cal) conThresholdImp = CompositeImage(conThresholdImp, CompositeImage.COMPOSITE) conThresholdImp.show() conlabelImp = ImagePlus("Label map " + originalTitle, conlabelImpStack) conlabelImp.setDimensions(1, imp1.getNSlices(), imp1.getNFrames()) conlabelImp.setCalibration(cal) stats = StackStatistics(conlabelImp) conlabelImp = CompositeImage(conlabelImp, CompositeImage.COMPOSITE)
print MeanChannel1 print MeanChannel2 print MeanChannel3 print NormChannel1 print NormChannel2 print NormChannel3 print XYCoordinates print Distance print Velocity ort = ResultsTable() ort.setPrecision(3) print ort.getCounter count = len(MeanChannel1) for i in range(count): ort.incrementCounter() ort.addValue("Frame", i) ort.addValue("Channel 1", MeanChannel1[i]) ort.addValue("Channel 2", MeanChannel2[i]) if threecolour: ort.addValue("Channel 3", MeanChannel3[i]) ort.addValue("NormCh 1", NormChannel1[i]) ort.addValue("NormCh 2", NormChannel2[i]) if threecolour: ort.addValue("NormCh 3", NormChannel3[i]) ort.addValue("XY coordinates", str(XYCoordinates[i])) ort.addValue("Distance in um", str((Distance[i]*Pixelsize))) ort.addValue("Velocity in um/s", str((Velocity[i]*Pixelsize))) ort.show("Measured intensities")
Dico = { 'Image': hit['ImageName'], 'Slice': i, 'Template': hit['TemplateName'], 'Xcorner': Xcorner, 'Ycorner': Ycorner, 'Xcenter': Xcenter, 'Ycenter': Ycenter, 'Score': hit['Score'] } # column order is defined below if add_roi: # Add ROI index to the result table Dico['Roi Index'] = rm.getCount() AddToTable(Table, Dico, Order=("Image", "Slice", "Template", "Score", "Roi Index", "Xcorner", "Ycorner", "Xcenter", "Ycenter")) else: AddToTable(Table, Dico, Order=("Image", "Slice", "Template", "Score", "Xcorner", "Ycorner", "Xcenter", "Ycenter")) # Display result table if show_table: Table.show("Results")
imp6.setTitle("Cells filtered by number of spots in Channel 2") #print "Cells filtered by Area: " + str(Filtered_Cells_Area) #print "Cells filtered by length: " + str(Filtered_Cells_Length) #print "Cells filtered by number of spots in Channel 1: " + str(C1_Filtered_Cells_Spots) #print "Cells filtered by number of spots in Channel 2: " + str(C2_Filtered_Cells_Spots) #print "Cells filtered by distance: " + str(Filtered_Cells_SpotDistance) #print allPearson #print average(allPearson) ort.show("Distance map") ort2.show("Point measurements") """ Saving the Results tables: Result tables will be saved to the directory specified in "savepath" """ dataname = imp_orig.getShortTitle() if automatic_save_results: # Gather filenames of the savedirectory filename_ort = dataname + "_Distance_Map_001.csv" savename_ort = savepath + "/" + filename_ort # Generate complete savepath print savename_ort ort.saveAs(savename_ort) # save