def keep_blobs_bigger_than(imp, min_size_pix=100): """remove all blobs other than the largest by area""" imp.killRoi() rt = ResultsTable() if "Size_filtered_" in imp.getTitle(): title_addition = "" else: title_addition = "Size_filtered_" out_imp = IJ.createImage("{}{}".format(title_addition, imp.getTitle()), imp.getWidth(), imp.getHeight(), 1, 8) out_imp.show() IJ.run(out_imp, "Select All", "") IJ.run(out_imp, "Set...", "value=0 slice") mxsz = imp.width * imp.height roim = RoiManager() pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, ParticleAnalyzer.AREA | ParticleAnalyzer.SLICE, rt, min_size_pix, mxsz) pa.setRoiManager(roim) roim.reset() rt.reset() pa.analyze(imp) rt_areas = rt.getColumn(rt.getColumnIndex("Area")).tolist() # print("Number of cells identified: {}".format(len(rt_areas))); for idx in range(len(rt_areas)): roim.select(out_imp, idx) IJ.run(out_imp, "Set...", "value=255 slice") mx_ind = rt_areas.index(max(rt_areas)) roim.reset() roim.close() imp.changes = False imp.close() return out_imp
def keep_largest_blob(imp): """remove all blobs other than the largest by area""" rt = ResultsTable() mxsz = imp.width * imp.height roim = RoiManager(False) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, ParticleAnalyzer.AREA | ParticleAnalyzer.SLICE, rt, 0, mxsz) pa.setRoiManager(roim) for idx in range(1, imp.getImageStackSize() + 1): roim.reset() rt.reset() imp.setPosition(idx) pa.analyze(imp) rt_areas = rt.getColumn(rt.getColumnIndex("Area")).tolist() mx_ind = rt_areas.index(max(rt_areas)) indices_to_remove = [ a for a in range(0, len(rt_areas)) if a != mx_ind ] indices_to_remove.reverse() for rem_idx in indices_to_remove: roim.select(imp, rem_idx) IJ.run(imp, "Set...", "value=0 slice") imp.killRoi() roim.reset() roim.close()
def analyze(imp, min_area): MAXSIZE = 1000000000000 MINCIRCULARITY = 0.0 MAXCIRCULARITY = 1. options = PA.SHOW_MASKS temp_results = ResultsTable() p = PA(options, PA.AREA + PA.MEAN, temp_results, min_area, MAXSIZE, MINCIRCULARITY, MAXCIRCULARITY) p.setHideOutputImage(True) p.analyze(imp) if temp_results.getCounter() == 0: areas = [] signals = [] else: areas = list(temp_results.getColumn(0)) signals = list(temp_results.getColumn(1)) count = len(areas) area = sum(areas) total = 0 if area > 0: total = sum([a*s for a,s in zip(areas, signals)]) / area return p.getOutputImage(), count, area, total
def countParticles(imp, roim, minSize, maxSize, minCircularity, maxCircularity): # Create a table to store the results table = ResultsTable() # Create the particle analyzer pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA|Measurements.MEAN, table, minSize, maxSize, minCircularity, maxCircularity) #pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA|Measurements.MEAN, table, 10, Double.POSITIVE_INFINITY, 0.5, 1.0) #pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA|Measurements.MEAN, table, 5, 6, 0.5, 1.0) pa.setRoiManager(roim) pa.setHideOutputImage(True) if pa.analyze(imp): print "All ok" else: print "There was a problem in analyzing", blobs areas = table.getColumn(0) intensities = table.getColumn(1) if ( (areas!=None) and (intensities!=None)): for area, intensity in zip(areas,intensities): print str(area)+": "+str(intensity)
def countParticles(imp, roim, minSize, maxSize, minCircularity, maxCircularity): # Create a table to store the results table = ResultsTable() # Create the particle analyzer pa = ParticleAnalyzer( ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA | Measurements.MEAN | Measurements.ELLIPSE, table, minSize, maxSize, minCircularity, maxCircularity) pa.setRoiManager(roim) pa.setHideOutputImage(True) if pa.analyze(imp): print "All ok" else: print "There was a problem in analyzing", blobs areas = table.getColumn(0) intensities = table.getColumn(1) majors = table.getColumn(2)
def getParticleCenters(imp): # Create a table to store the results rt = ResultsTable() paOpts = PA.SHOW_OUTLINES \ + PA.INCLUDE_HOLES \ + PA.EXCLUDE_EDGE_PARTICLES measurements = PA.CENTROID + PA.CENTER_OF_MASS MINSIZE = 1000 MAXSIZE = Double.POSITIVE_INFINITY pa = PA(paOpts,measurements, rt, MINSIZE, MAXSIZE) pa.setHideOutputImage(True) if not pa.analyze(imp): print "There was a problem in analyzing", imp # The measured centroids are listed in the first column of the results table, as a float array: centroids_x = rt.getColumn(rt.X_CENTROID) centroids_y = rt.getColumn(rt.Y_CENTROID) coms_x = rt.getColumn(rt.X_CENTER_OF_MASS) coms_y = rt.getColumn(rt.Y_CENTER_OF_MASS) return (centroids_x,centroids_y, coms_x, coms_y)
def process(inputpath, outputpath): imp = IJ.openImage(inputpath) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) IJ.setThreshold(imp, t1, 255) #imp.show() #WaitForUserDialog("Title", "Look at image").show() IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Watershed", "") # Counts and measures the area of particles and adds them to a table called areas. Also adds them to the ROI manager table = ResultsTable() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 50, 9999999999999999, 0.2, 1.0) pa.setHideOutputImage(True) pa.analyze(imp) imp.changes = False imp.close() areas = table.getColumn(0) summary = {} if areas: summary['Image'] = filename summary['Nuclei.count'] = len(areas) summary['Area.Covered'] = sum(areas) fieldnames = list(summary.keys()) with open(outputpath, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator='\n') if os.path.getsize(outputDirectory + "/" + outputname + ".csv") < 1: writer.writeheader() writer.writerow(summary)
def generate_background_rois(input_mask_imp, params, membrane_edges, dilations=5, threshold_method=None, membrane_imp=None): """automatically identify background region based on auto-thresholded image, existing membrane edges and position of midpoint anchor""" if input_mask_imp is None and membrane_imp is not None: segmentation_imp = Duplicator().run(membrane_imp) # do thresholding using either previous method if threhsold_method is None or using (less conservative?) threshold method if (threshold_method is None or not (threshold_method in params.listThresholdMethods())): mask_imp = make_and_clean_binary(segmentation_imp, params.threshold_method) else: mask_imp = make_and_clean_binary(segmentation_imp, threshold_method) segmentation_imp.close() else: input_mask_imp.killRoi() mask_imp = Duplicator().run(input_mask_imp) rois = [] IJ.setForegroundColor(0, 0, 0) roim = RoiManager(True) rt = ResultsTable() for fridx in range(mask_imp.getNFrames()): mask_imp.setT(fridx + 1) # add extra bit to binary mask from loaded membrane in case user refined edges... # flip midpoint anchor across the line joining the two extremes of the membrane, # and fill in the triangle made by this new point and those extremes poly = membrane_edges[fridx].getPolygon() l1 = (poly.xpoints[0], poly.ypoints[0]) l2 = (poly.xpoints[-1], poly.ypoints[-1]) M = (0.5 * (l1[0] + l2[0]), 0.5 * (l1[1] + l2[1])) Mp1 = (params.manual_anchor_midpoint[0][0] - M[0], params.manual_anchor_midpoint[0][1] - M[1]) p2 = (M[0] - Mp1[0], M[1] - Mp1[1]) new_poly_x = list(poly.xpoints) new_poly_x.append(p2[0]) new_poly_y = list(poly.ypoints) new_poly_y.append(p2[1]) mask_imp.setRoi(PolygonRoi(new_poly_x, new_poly_y, PolygonRoi.POLYGON)) IJ.run(mask_imp, "Fill", "slice") mask_imp.killRoi() # now dilate the masked image and identify the unmasked region closest to the midpoint anchor ip = mask_imp.getProcessor() dilations = 5 for d in range(dilations): ip.dilate() ip.invert() mask_imp.setProcessor(ip) mxsz = mask_imp.getWidth() * mask_imp.getHeight() pa = ParticleAnalyzer( ParticleAnalyzer.ADD_TO_MANAGER | ParticleAnalyzer.SHOW_PROGRESS, ParticleAnalyzer.CENTROID, rt, 0, mxsz) pa.setRoiManager(roim) pa.analyze(mask_imp) ds_to_anchor = [ math.sqrt((x - params.manual_anchor_midpoint[0][0])**2 + (y - params.manual_anchor_midpoint[0][1])**2) for x, y in zip( rt.getColumn(rt.getColumnIndex("X")).tolist(), rt.getColumn(rt.getColumnIndex("Y")).tolist()) ] if len(ds_to_anchor) > 0: roi = roim.getRoi(ds_to_anchor.index(min(ds_to_anchor))) rois.append(roi) else: rois.append(None) roim.reset() rt.reset() roim.close() mask_imp.close() return rois
def analyze(iDataSet, tbModel, p, output_folder): # # LOAD FILES # filepath = tbModel.getFileAPth(iDataSet, "RAW", "IMG") filename = tbModel.getFileName(iDataSet, "RAW", "IMG") print("Analyzing: "+filepath) IJ.run("Bio-Formats Importer", "open=["+filepath+"] color_mode=Default view=Hyperstack stack_order=XYCZT"); imp = IJ.getImage() # # INIT # IJ.run("Options...", "iterations=1 count=1"); # # SCALING # IJ.run(imp, "Scale...", "x="+str(p["scale"])+" y="+str(p["scale"])+" z=1.0 interpolation=Bilinear average process create"); imp = IJ.getImage() # save output file output_file = filename+"--downscale_input.tif" IJ.saveAs(IJ.getImage(), "TIFF", os.path.join(output_folder, output_file)) tbModel.setFileAPth(output_folder, output_file, iDataSet, "INPUT","IMG") # # CONVERSION # #IJ.run(imp, "8-bit", ""); # # CROPPING # #imp.setRoi(392,386,750,762); #IJ.run(imp, "Crop", ""); # # BACKGROUND SUBTRACTION # # IJ.run(imp, "Subtract...", "value=32768 stack"); IJ.run(imp, "Z Project...", "projection=[Average Intensity]"); imp_avg = IJ.getImage() ic = ImageCalculator(); imp = ic.run("Subtract create 32-bit stack", imp, imp_avg); # # REGION SEGMENTATION # imp1 = Duplicator().run(imp, 1, imp.getImageStackSize()-1) imp2 = Duplicator().run(imp, 2, imp.getImageStackSize()) imp_diff = ic.run("Subtract create 32-bit stack", imp1, imp2); #imp_diff.show() IJ.run(imp_diff, "Z Project...", "projection=[Standard Deviation]"); imp_diff_sd = IJ.getImage() # save IJ.run(imp_diff_sd, "Gaussian Blur...", "sigma=5"); output_file = filename+"--sd.tif" IJ.saveAs(imp_diff_sd, "TIFF", os.path.join(output_folder, output_file)) tbModel.setFileAPth(output_folder, output_file, iDataSet, "SD","IMG") IJ.run(imp_diff_sd, "Enhance Contrast", "saturated=0.35"); IJ.run(imp_diff_sd, "8-bit", ""); IJ.run(imp_diff_sd, "Properties...", "unit=p pixel_width=1 pixel_height=1 voxel_depth=1"); IJ.run(imp_diff_sd, "Auto Local Threshold", "method=Niblack radius=60 parameter_1=2 parameter_2=0 white"); rm = ROIManipulator.getEmptyRm() IJ.run(imp_diff_sd, "Analyze Particles...", "add"); # select N largest Rois diameter_roi = [] for i in range(rm.getCount()): roi = rm.getRoi(i) diameter_roi.append([roi.getFeretsDiameter(), roi]) diameter_roi = sorted(diameter_roi, reverse=True) #print diameter_roi rm.reset() for i in range(min(len(diameter_roi), p["n_rois"])): rm.addRoi(diameter_roi[i][1]) # save output_file = filename+"--rois" ROIManipulator.svRoisToFl(output_folder, output_file, rm.getRoisAsArray()) tbModel.setFileAPth(output_folder, output_file+".zip", iDataSet, "REGIONS","ROI") # # FFT in each region # IJ.run(imp, "Variance...", "radius=2 stack"); output_file = filename+"--beats.tif" IJ.saveAs(imp, "TIFF", os.path.join(output_folder, output_file)) tbModel.setFileAPth(output_folder, output_file, iDataSet, "BEATS","IMG") n = rm.getCount() for i_roi in range(n): imp_selection = Duplicator().run(imp) rm.select(imp_selection, i_roi) IJ.run(imp_selection, "Clear Outside", "stack"); imp_selection.show() # FFT using Parallel FFTJ transformer = FloatTransformer(imp_selection.getStack()) transformer.fft() imp_fft = transformer.toImagePlus(SpectrumType.FREQUENCY_SPECTRUM) imp_fft.show() # Analyze FFt IJ.run(imp_fft, "Gaussian Blur 3D...", "x=0 y=0 z=1.5"); IJ.run(imp_fft, "Plot Z-axis Profile", ""); output_file = filename+"--Region"+str(i_roi+1)+"--fft.tif" IJ.saveAs(IJ.getImage(), "TIFF", os.path.join(output_folder, output_file)) tbModel.setFileAPth(output_folder, output_file, iDataSet, "FFT_R"+str(i_roi+1),"IMG") IJ.run(imp_fft, "Select All", ""); rm.addRoi(imp_fft.getRoi()) rm.select(rm.getCount()) rt = ResultsTable() rt = rm.multiMeasure(imp_fft); #print(rt.getColumnHeadings); x = rt.getColumn(rt.getColumnIndex("Mean1")) #rm.runCommand("delete") peak_height_pos = [] x_min = 10 for i in range(x_min,len(x)/2): before = x[i-1] center = x[i] after = x[i+1] if (center>before) and (center>after): peak_height_pos.append([float(x[i]),i]) if len(peak_height_pos)>0: peak_height_pos = sorted(peak_height_pos, reverse=True) n_max = 3 for i_max in range(min(len(peak_height_pos),n_max)): tbModel.setNumVal(round(float(len(x))/float(peak_height_pos[i_max][1]),2), iDataSet, "F"+str(i_max+1)+"_R"+str(i_roi+1)) tbModel.setNumVal(int(peak_height_pos[i_max][0]), iDataSet, "A"+str(i_max+1)+"_R"+str(i_roi+1))
#Analyses particles: finds all the objects that match criteria pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER | ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES, Measurements.AREA, table, minimum_size, maximum_size, 0.1, 1.0) pa.setHideOutputImage(True) pa.analyze(channel) if thresholdMode: channel.show() WaitForUserDialog("Title", "Look at threshold for" + color[i]).show() #adds count to summary if table.getColumnIndex("Area") != -1: summary[color[i] + "-ROI-count"] = len(table.getColumn(table.getColumnIndex("Area"))) channel.changes = False channel.close() roim.reset() roim.close() # Writes everything in the output file fieldnames = ["Directory", "Filename", "Red-intensity", "Red-threshold-used", "Red-ROI-count", "Green-intensity", "Green-threshold-used", "Green-ROI-count", "Blue-intensity", "Blue-threshold-used", "Blue-ROI-count"] with open(output_name, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator = '\n') if os.path.getsize(output_name) < 1:
def process(self,imp): # extract nucleus channel, 8-bit and twice binned imp.setC(self.nucleusChannel) ip = imp.getChannelProcessor().duplicate() ip = ip.convertToByteProcessor() ip = ip.bin(4) nucleus = ImagePlus("nucleus_channel", ip) # threshold image and separate clumped nuclei IJ.run(nucleus, "Auto Threshold", "method=Otsu white setthreshold show"); IJ.run(nucleus, "Make Binary", "thresholded remaining black"); IJ.run(nucleus, "Watershed", ""); directory = imp.getTitle() directory = directory.replace(" ", "_")\ .replace(",", "_")\ .replace("#", "_series")\ .replace("...", "")\ .replace(".","_") directory = os.path.join(self.exportDir, directory) sliceDirectory = os.path.join(directory, "slices") print directory print sliceDirectory if not os.path.exists(sliceDirectory): os.makedirs(sliceDirectory) # Create a table to store the results table = ResultsTable() # Create a hidden ROI manager, to store a ROI for each blob or cell #roim = RoiManager(True) # remove small particles and border particles pa = ParticleAnalyzer(\ ParticleAnalyzer.ADD_TO_MANAGER | ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES,\ Measurements.CENTER_OF_MASS,\ table,\ self.minArea, self.maxArea,\ 0.0,1.0) if pa.analyze(nucleus): print "All ok, number of particles: ", table.size() else: print "There was a problem in analyzing", imp, nucleus table.save(os.path.join(directory, "rt.csv")) # read the center of mass coordinates cmx = table.getColumn(0) cmy = table.getColumn(1) if self.debug: imp.show() i=0 for i in range(0, min(self.nCells,table.size())): # ROI around the cell cmx = table.getValue("XM",i) cmy = table.getValue("YM",i) x = 4 * cmx - (self.boxSize - 1) / 2 y = 4 * cmy - (self.boxSize - 1) / 2 if (x < self.edge or y < self.edge or x > imp.getWidth() - self.edge or y > imp.getHeight() - self.edge): continue roi = Roi(x,y,self.boxSize,self.boxSize) imp.setRoi(roi, False) cellStack = ImageStack(self.boxSize, self.boxSize) for z in range(1, imp.getNSlices() + 1): imp.setSlice(z) for c in range(1, imp.getNChannels() + 1): imp.setC(c) # copy ROI to stack imp.copy() impSlice = imp.getClipboard() cellStack.addSlice(impSlice.getProcessor()) if self.slices: sliceTitle = "cell_%s_z%s_c%s" % (str(i).zfill(4), str(z).zfill(3), str(c)) print sliceTitle IJ.saveAsTiff(impSlice, os.path.join(sliceDirectory, sliceTitle)) impSlice.close() title = "cell_" + str(i).zfill(4) cell = ImagePlus(title, cellStack) # save ROI image IJ.saveAsTiff(cell, os.path.join(directory, title)) cell.close() if self.debug: imp.updateAndDraw() wait = Wait("particle done") wait.show()
def updatepressed(event): self.__image=IJ.getImage() rm = RoiManager.getInstance() if (rm==None): rm = RoiManager() rm.runCommand("reset") self.__image.killRoi() IJ.run("Threshold...") IJ.setAutoThreshold(self.__image, "MaxEntropy") rt=ResultsTable() pa=ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER+ParticleAnalyzer.CLEAR_WORKSHEET , Measurements.AREA+Measurements.ELLIPSE+Measurements.MEAN, rt, 0.00, 10000.00, 0.00, 1.00) pa.analyze(self.__image) self.__roisArray=[] self.__roisArray=rm.getRoisAsArray() #for i in range(rm.getCount()) : # rm.select(i) # rm.runCommand("Set Color", "0000FF", 2) IJ.resetThreshold(self.__image) rt.show("tempRT") areas=rt.getColumn(ResultsTable.AREA) means=rt.getColumn(ResultsTable.MEAN) majors=rt.getColumn(ResultsTable.MAJOR) minors=rt.getColumn(ResultsTable.MINOR) #print 0 if self.__slidersDict["Area_max"].getMaximum() < int(max(areas)+1): # print 1 self.__slidersDict["Area_max"].setMaximum(int(max(areas))+1) if self.__slidersDict["Area_min"].getMaximum() < int(max(areas)+1): # print 2 self.__slidersDict["Area_min"].setMaximum(int(max(areas))+1) if self.__slidersDict["Mean_max"].getMaximum() < int(max(means)+1): # print 3 self.__slidersDict["Mean_max"].setMaximum(int(max(means))+1) if self.__slidersDict["Mean_min"].getMaximum() < int(max(means)+1): # print 4 self.__slidersDict["Mean_min"].setMaximum(int(max(means))+1) if self.__slidersDict["Major_max"].getMaximum() < int(max(majors)): # print 5 self.__slidersDict["Major_max"].setMaximum(int(max(majors))+1) if self.__slidersDict["Major_min"].getMaximum() < int(max(majors)+1): # print 6 self.__slidersDict["Major_min"].setMaximum(int(max(majors))+1) if self.__slidersDict["Minor_max"].getMaximum() < int(max(minors)+1): # print 7 self.__slidersDict["Minor_max"].setMaximum(int(max(minors))+1) if self.__slidersDict["Minor_min"].getMaximum() < int(max(minors)+1): # print 8 self.__slidersDict["Minor_min"].setMaximum(int(max(minors))+1) if self.__slidersDict["AR_max"].getMaximum() < int((max(majors)+1)/min(minors)+1): # print 9 self.__slidersDict["AR_max"].setMaximum(int((max(majors)+1)/(min(minors)))) if self.__slidersDict["AR_min"].getMaximum() < int((max(majors)+1)/min(minors)): # print 10 self.__slidersDict["AR_min"].setMaximum(int((max(majors)+1)/(min(minors)))) #print 11 for sb in self.__slidersDict.values(): sb.repaint() #rm.runCommand("reset") #temprois=self.getIncludeRois() #IJ.run(self.__image, "Remove Overlay", "") #o=Overlay() #for roi in temprois: # o.addElement(roi) #self.__image.killRoi() #self.__image.setOverlay(o) self.__image.updateAndDraw()
def process(subDir, subsubDir, outputDirectory, filename): subFolder = subDir + "/" + subsubDir # Opens the d0 image and sets default properties imp = IJ.openImage(inputDirectory + subFolder + '/' + filename) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) # Sets the threshold and watersheds. for more details on image processing, see https://imagej.nih.gov/ij/developer/api/ij/process/ImageProcessor.html ic = ImageConverter(imp) ic.convertToGray8() imp.updateAndDraw() dup = imp.duplicate() IJ.run( dup, "Convolve...", "text1=[-1 -1 -1 -1 -1\n-1 -1 -1 -1 -1\n-1 -1 24 -1 -1\n-1 -1 -1 -1 -1\n-1 -1 -1 -1 -1\n] normalize" ) stats = dup.getStatistics(Measurements.MEAN | Measurements.MIN_MAX | Measurements.STD_DEV) dup.close() blurry = (stats.mean < 18 and stats.stdDev < 22) or stats.max < 250 IJ.setThreshold(imp, lowerBounds[0], 255) IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Watershed", "") if displayImages: imp.show() WaitForUserDialog("Title", "Look at image").show() # Counts and measures the area of particles and adds them to a table called areas. Also adds them to the ROI manager table = ResultsTable() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.setHideOutputImage(True) pa.analyze(imp) if not displayImages: imp.changes = False imp.close() areas = table.getColumn(0) # This loop goes through the remaining channels for the other markers, by replacing the d0 at the end with its corresponding channel # It will save all the area fractions into a 2d array called areaFractionsArray areaFractionsArray = [] areaMeansArray = [] means = [] totalAreas = [] for chan in channels: v, x = chan # Opens each image and thresholds imp = IJ.openImage(inputDirectory + subFolder + '/' + filename.replace("d0.TIF", "d" + str(x) + ".TIF")) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) ic.convertToGray8() imp.updateAndDraw() stats = imp.getStatistics(Measurements.MEAN) means.append(stats.mean) areaMeans = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.MEAN) areaMeans.append(stats.mean) IJ.setThreshold(imp, lowerBounds[x], 255) IJ.run(imp, "Convert to Mask", "") if displayImages: imp.show() WaitForUserDialog("Title", "Look at image").show() stats = imp.getStatistics(Measurements.AREA_FRACTION) totalAreas.append(stats.areaFraction) # Measures the area fraction of the new image for each ROI from the ROI manager. areaFractions = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA_FRACTION) areaFractions.append(stats.areaFraction) # Saves the results in areaFractionArray areaFractionsArray.append(areaFractions) areaMeansArray.append(sum(areaMeans) / len(areaMeans)) if not displayImages: imp.changes = False imp.close() roim.close() # Figures out what well the image is a part of ind = filename.index("p00_0_") row = filename[ind + 6:ind + 7] column = str(int(filename[ind + 7:ind + 9])) # Creates the summary dictionary which will correspond to a single row in the output csv, with each key being a column summary = {} # Finds the name of the well from the nameArray 2d array if row in nameArray: if column in nameArray[row]: summary['Name'] = nameArray[row][column] summary['Image'] = filename summary['Directory'] = subDir summary['SubDirectory'] = subsubDir summary['Row'] = row summary['Column'] = column # Adds usual columns summary['size-average'] = 0 summary['#nuclei'] = 0 summary['all-negative'] = 0 summary['too-big-(>' + str(tooBigThreshold) + ')'] = 0 summary['too-small-(<' + str(tooSmallThreshold) + ')'] = 0 summary['image-quality'] = blurry # Creates the fieldnames variable needed to create the csv file at the end. fieldnames = [ 'Name', 'Directory', 'SubDirectory', 'Image', 'Row', 'Column', 'size-average', 'image-quality', 'too-big-(>' + str(tooBigThreshold) + ')', 'too-small-(<' + str(tooSmallThreshold) + ')', '#nuclei', 'all-negative' ] # Adds the columns for each individual marker (ignoring Dapi since it was used to count nuclei) for chan in channels: v, x = chan summary[v + "-positive"] = 0 summary[v + "-intensity"] = means[x] summary[v + "-area"] = totalAreas[x] summary[v + "-intensity-in-nuclei"] = areaMeansArray[x] summary[v + "-area-fraction-in-nuclei"] = sum( areaFractionsArray[x]) / len(areaFractionsArray[x]) fieldnames.append(v + "-positive") fieldnames.append(v + "-intensity") fieldnames.append(v + "-area") fieldnames.append(v + "-intensity-in-nuclei") fieldnames.append(v + "-area-fraction-in-nuclei") # Adds the column for colocalization between first and second marker if len(channels) > 2: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-positive') # Adds the columns for colocalization between all three markers if len(channels) > 3: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[2][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive') # Loops through each particle and adds it to each field that it is True for. areaCounter = 0 if not (areas is None): for z, area in enumerate(areas): if not (area is None or summary is None): if area > tooBigThreshold: summary['too-big-(>' + str(tooBigThreshold) + ')'] += 1 elif area < tooSmallThreshold: summary['too-small-(<' + str(tooSmallThreshold) + ')'] += 1 else: summary['#nuclei'] += 1 areaCounter += area temp = 0 for y, chan in enumerate(channels): v, x = chan if areaFractionsArray[y][z] > areaFractionThreshold: summary[chan[0] + '-positive'] += 1 if x != 0: temp += 1 if temp == 0: summary['all-negative'] += 1 if len(channels) > 2: if areaFractionsArray[1][z] > areaFractionThreshold: if areaFractionsArray[2][z] > areaFractionThreshold: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] += 1 if len(channels) > 3: if areaFractionsArray[1][z] > areaFractionThreshold: if areaFractionsArray[3][z] > areaFractionThreshold: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[2][z] > areaFractionThreshold: if areaFractionsArray[3][z] > areaFractionThreshold: summary[channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[1][ z] > areaFractionThreshold: summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 # Calculate the average of the particles sizes if float(summary['#nuclei']) > 0: summary['size-average'] = round(areaCounter / summary['#nuclei'], 2) # Opens and appends one line on the final csv file for the subfolder (remember that this is still inside the loop that goes through each image) with open(outputDirectory + "/" + outputName + ".csv", 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator='\n') if os.path.getsize(outputDirectory + "/" + outputName + ".csv") < 1: writer.writeheader() writer.writerow(summary)
table.setValue("TRACK_ID", rowNumber, id) table.setValue("POSITION_X", rowNumber, x) table.setValue("POSITION_Y", rowNumber, y) table.setValue("FRAME", rowNumber, t) table.setValue("MEAN_INTENSITY", rowNumber, mean) table.setValue("STANDARD_DEVIATION", rowNumber, std) table.setValue("SNR", rowNumber, snr) rowNumber = rowNumber + 1 # roi1 = PointRoi(x/dx, y/dy) # roi1.setPosition(int(t)) # rm.add(imp, roi1, nextRoi) # nextRoi = nextRoi+1 frame = table.getColumn(3) mean = table.getColumn(4) std = table.getColumn(5) snr = table.getColumn(6) var = [s / m for s, m in zip(std, mean)] from collections import Counter as Counter idxvec = [ item for item, count in Counter(frame).items() if count > 1 ] if idxvec == []: continue division = min(idxvec) idx = frame.index(division) + 1 mean = mean[:idx]
def process(subFolder, outputDirectory, filename): imp = IJ.openImage(inputDirectory + subFolder + '/' + filename) imp.show() IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) dup = imp.duplicate() dup_title = dup.getTitle() ic.convertToGray8() imp.updateAndDraw() IJ.run("Threshold...") IJ.setThreshold(218, 245) IJ.run(imp, "Convert to Mask", "") rm = RoiManager() imp.getProcessor().setThreshold(0, 0, ImageProcessor.NO_LUT_UPDATE) boundroi = ThresholdToSelection.run(imp) rm.addRoi(boundroi) imp.changes = False imp.close() images = [None] * 5 intensities = [None] * 5 blobsarea = [None] * 5 blobsnuclei = [None] * 5 cells = [None] * 5 bigareas = [None] * 5 IJ.run(dup, "Colour Deconvolution", "vectors=[H DAB]") images[0] = getImage(dup_title + "-(Colour_1)") images[1] = getImage(dup_title + "-(Colour_2)") images[2] = getImage(dup_title + "-(Colour_3)") images[2].close() for chan in channels: v, x = chan imp = images[x] imp.show() for roi in rm.getRoiManager().getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.MEAN | Measurements.AREA) intensities[x] = stats.mean bigareas[x] = stats.area rm.runCommand(imp, "Show None") rm.close() # Opens the ch00 image and sets default properties imp = images[0].duplicate() IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) # Sets the threshold and watersheds. for more details on image processing, see https://imagej.nih.gov/ij/developer/api/ij/process/ImageProcessor.html imp.show() setTempCurrentImage(imp) ic = ImageConverter(imp) imp.updateAndDraw() IJ.run(imp, "Gaussian Blur...", "sigma=" + str(blur)) imp.updateAndDraw() imp.show() IJ.run("Threshold...") IJ.setThreshold(30, lowerBounds[0]) if displayImages: imp.show() WaitForUserDialog( "Title", "Adjust threshold for nuclei. Current region is: " + region).show() IJ.run(imp, "Convert to Mask", "") # Counts and measures the area of particles and adds them to a table called areas. Also adds them to the ROI manager table = ResultsTable() roim = RoiManager() pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 5, 9999999999999999, 0.05, 1.0) pa.setHideOutputImage(True) imp = IJ.getImage() # imp.getProcessor().invert() pa.analyze(imp) imp.changes = False imp.close() areas = table.getColumn(0) # This loop goes through the remaining channels for the other markers, by replacing the ch00 at the end with its corresponding channel # It will save all the area fractions into a 2d array called areaFractionsArray areaFractionsArray = [None] * 5 maxThresholds = [] for chan in channels: v, x = chan # Opens each image and thresholds imp = images[x] IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) imp.show() setTempCurrentImage(imp) ic = ImageConverter(imp) ic.convertToGray8() imp.updateAndDraw() rm.runCommand(imp, "Show None") rm.runCommand(imp, "Show All") rm.runCommand(imp, "Show None") imp.show() IJ.selectWindow(imp.getTitle()) IJ.run("Threshold...") IJ.setThreshold(20, lowerBounds[x]) if displayImages: WaitForUserDialog( "Title", "Adjust threshold for " + v + ". Current region is: " + region).show() ip = imp.getProcessor() maxThresholds.append(ip.getMaxThreshold()) IJ.run(imp, "Convert to Mask", "") # Measures the area fraction of the new image for each ROI from the ROI manager. areaFractions = [] for roi in roim.getRoiManager().getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA_FRACTION) areaFractions.append(stats.areaFraction) # Saves the results in areaFractionArray areaFractionsArray[x] = areaFractions roim.close() for chan in channels: v, x = chan imp = images[x] imp.deleteRoi() imp.updateAndDraw() setTempCurrentImage(imp) roim = RoiManager() pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.analyze(imp) blobs = [] cell = [] for roi in roim.getRoiManager().getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA) blobs.append(stats.area) if stats.area > tooSmallThresholdDAB and stats.area < tooBigThresholdDAB: cell.append(stats.area) blobsarea[x] = sum(blobs) blobsnuclei[x] = len(blobs) cells[x] = len(cell) imp.changes = False imp.close() roim.reset() roim.close() # Creates the summary dictionary which will correspond to a single row in the output csv, with each key being a column summary = {} summary['Image'] = filename summary['Directory'] = subFolder # Adds usual columns summary['size-average'] = 0 summary['#nuclei'] = 0 summary['all-negative'] = 0 summary['too-big-(>' + str(tooBigThreshold) + ')'] = 0 summary['too-small-(<' + str(tooSmallThreshold) + ')'] = 0 # Creates the fieldnames variable needed to create the csv file at the end. fieldnames = [ 'Directory', 'Image', 'size-average', 'too-big-(>' + str(tooBigThreshold) + ')', 'too-small-(<' + str(tooSmallThreshold) + ')', '#nuclei', 'all-negative' ] for row in info: if row['Animal ID'] == filename.replace('s', '-').replace( 'p', '-').split('-')[0]: for key, value in row.items(): fieldnames.insert(0, key) summary[key] = value # Adds the columns for each individual marker (ignoring Dapi since it was used to count nuclei) summary["tissue-area"] = bigareas[0] fieldnames.append("tissue-area") for chan in channels: v, x = chan summary[v + "-HEMO-cells"] = 0 fieldnames.append(v + "-HEMO-cells") summary[v + "-intensity"] = intensities[x] fieldnames.append(v + "-intensity") summary[v + "-area"] = blobsarea[x] fieldnames.append(v + "-area") summary[v + "-area/tissue-area"] = blobsarea[x] / bigareas[0] fieldnames.append(v + "-area/tissue-area") summary[v + "-particles"] = blobsnuclei[x] fieldnames.append(v + "-particles") summary[v + "-cells"] = cells[x] fieldnames.append(v + "-cells") summary[v + "-particles/tissue-area"] = blobsnuclei[x] / bigareas[0] fieldnames.append(v + "-particles/tissue-area") fieldnames.append(v + "-HEMO-Cells/tissue-area") # Adds the column for colocalization between first and second marker if len(channels) > 2: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-positive') # Adds the columns for colocalization between all three markers if len(channels) > 3: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[2][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive') # Loops through each particle and adds it to each field that it is True for. areaCounter = 0 for z, area in enumerate(areas): if area > tooBigThreshold: summary['too-big-(>' + str(tooBigThreshold) + ')'] += 1 elif area < tooSmallThreshold: summary['too-small-(<' + str(tooSmallThreshold) + ')'] += 1 else: summary['#nuclei'] += 1 areaCounter += area temp = 0 for chan in channels: v, x = chan if areaFractionsArray[x][z] > areaFractionThreshold[0]: summary[chan[0] + '-HEMO-cells'] += 1 if x != 0: temp += 1 if temp == 0: summary['all-negative'] += 1 if len(channels) > 2: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[2][z] > areaFractionThreshold[2]: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] += 1 if len(channels) > 3: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[2][z] > areaFractionThreshold[2]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[1][z] > areaFractionThreshold[1]: summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 # Calculate the average of the particles sizes for chan in channels: v, x = chan summary[v + "-cells/tissue-area"] = summary[v + "-cells"] / bigareas[0] if float(summary['#nuclei']) > 0: summary['size-average'] = round(areaCounter / summary['#nuclei'], 2) if displayImages: fieldnames = ["Directory", "Image"] for chan in channels: v, x = chan summary[v + "-threshold"] = maxThresholds[x] fieldnames.append(v + "-threshold") allMaxThresholds[v + "-" + region].append(maxThresholds[x]) # Opens and appends one line on the final csv file for the subfolder (remember that this is still inside the loop that goes through each image) with open(outputName, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator='\n') if os.path.getsize(outputName) < 1: writer.writeheader() writer.writerow(summary)
| Measurements.CENTROID | Measurements.ELLIPSE, table, minimum_size, 9999999999999999, 0.1, 1.0) pa.setHideOutputImage(True) pa.analyze(imp) index = -1 maxArea = -1 # Check if Column even exists (in case it didn't measure anything) if table.getColumnIndex("Area") != -1: # Find the ROI with the largest area for i, area in enumerate( table.getColumn(table.getColumnIndex("Area"))): if area > maxArea: index = i if thresholdMode: imp.show() # Writes everything in the output file if index != -1: diameter = 2 * math.sqrt( float(table.getValue("Area", index)) / (2 * math.pi)) isOrganoid = table.getValue( "Area", index) > area_threshold and table.getValue( "Area", index) > round_threshold output.write(
def processImages(cfg, wellName, wellPath, images): stats = [[[dict() for t in range(cfg.getValue(ELMConfig.numT))] for z in range(cfg.getValue(ELMConfig.numZ))] for c in range(cfg.getValue(ELMConfig.numChannels))] times = {} for c in range(0, cfg.getValue(ELMConfig.numChannels)): chanStr = 'ch%(channel)02d' % {"channel" : c}; chanName = cfg.getValue(ELMConfig.chanLabel)[c] # Set some config based upon channel if (cfg.getValue(ELMConfig.chanLabel)[c] in cfg.getValue(ELMConfig.chansToSkip)): continue if (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.BRIGHTFIELD): minCircularity = 0.001 # We want to identify one big cell ball, so ignore small less circular objects if cfg.params[ELMConfig.imgType] == "png": minSize = 5; else: minSize = 500 elif (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.BLUE) \ or (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.RED) \ or (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.GREEN): # minCircularity = 0.001 minSize = 5 elif (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.YELLOW): minCircularity = 0.001 minSize = 5 # Process images in Z stack for z in range(0, cfg.getValue(ELMConfig.numZ)): zStr = cfg.getZStr(z); for t in range(0, cfg.getValue(ELMConfig.numT)): tStr = cfg.getTStr(t) if (cfg.getValue(ELMConfig.imgType) == "png"): # Brightfield uses the whole iamge if (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.BRIGHTFIELD): currIP = IJ.openImage(images[c][z][t][0]) else: # otherwise, we'll plit off channels chanIdx = 2 if (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.RED): chanIdx = 0 elif (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.GREEN): chanIdx = 1; img = IJ.openImage(images[c][z][t][0]) imgChanns = ChannelSplitter.split(img); img.close() currIP = imgChanns[chanIdx]; else: currIP = IJ.openImage(images[c][z][t][0]) resultsImage = currIP.duplicate() dbgOutDesc = wellName + "_" + zStr + "_" + chanStr + "_" + tStr if (cfg.getValue(ELMConfig.numT) > 1): outputPath = os.path.join(wellPath, "images") if not os.path.exists(outputPath): os.makedirs(outputPath) else: outputPath = wellPath if cfg.getValue(ELMConfig.debugOutput): WindowManager.setTempCurrentImage(currIP) IJ.saveAs('png', os.path.join(outputPath, "Orig_" + dbgOutDesc + ".png")) # We need to get to a grayscale image, which will be done differently for different channels startTime = time.time() currIP = ELMImageUtils.getThresholdedMask(currIP, c, z, t, chanName, cfg, outputPath, dbgOutDesc) endTime = time.time() if not 'grayscale' in times: times['grayscale'] = [] times['grayscale'].append(endTime-startTime) if (not currIP): resultsImage.close() stats[c][z][t][ELMConfig.UM_AREA] = [] continue startTime = time.time() # Create a table to store the results table = ResultsTable() # Create a hidden ROI manager, to store a ROI for each blob or cell #roim = RoiManager(True) # Create a ParticleAnalyzer measurements = Measurements.AREA + Measurements.MEAN + Measurements.STD_DEV + Measurements.MIN_MAX + Measurements.CENTROID + Measurements.RECT + Measurements.ELLIPSE paFlags = ParticleAnalyzer.IN_SITU_SHOW | ParticleAnalyzer.SHOW_MASKS | ParticleAnalyzer.CLEAR_WORKSHEET pa = ParticleAnalyzer(paFlags, measurements, table, minSize, Double.POSITIVE_INFINITY, minCircularity, 1.0) #pa.setHideOutputImage(True) # The Result image is copied when CurrIP can still have calibration from loading # We want the output to be in terms of pixels, for ease of use, so adjust calibration resultsImage.setCalibration(currIP.getCalibration()) Analyzer.setRedirectImage(resultsImage) if not pa.analyze(currIP): print "There was a problem in analyzing", currIP endTime = time.time() if not 'pa' in times: times['pa'] = [] times['pa'].append(endTime-startTime) #for i in range(0, roim.getCount()) : # r = roim.getRoi(i); # r.setColor(Color.red) # r.setStrokeWidth(2) # The measured areas are listed in the first column of the results table, as a float array: newAreas = [] maxArea = 0 if table.getColumn(ResultsTable.AREA): for pixArea in table.getColumn(ResultsTable.AREA): a = pixArea * cfg.getValue(ELMConfig.pixelHeight) * cfg.getValue(ELMConfig.pixelWidth) newAreas.append(a) if (a > maxArea): maxArea = a # Threshold areas idxToRemove = set() if cfg.hasValue(ELMConfig.areaMaxPercentThreshold): areaPercentThresh = cfg.getValue(ELMConfig.areaMaxPercentThreshold) for i in range(0,len(newAreas)): if newAreas[i] < (areaPercentThresh * maxArea): idxToRemove.add(i) if cfg.hasValue(ELMConfig.areaAbsoluteThreshold): areaAbsoluteThresh = cfg.getValue(ELMConfig.areaAbsoluteThreshold) for i in range(0,len(newAreas)): if newAreas[i] < areaAbsoluteThresh: idxToRemove.add(i) for i in sorted(idxToRemove, reverse=True): del newAreas[i] stats[c][z][t][ELMConfig.UM_AREA] = newAreas centroidX = [] centroidY = [] roiX = [] roiY = [] roiWidth = [] roiHeight = [] rArea = [] # Store all of the other data for col in range(0,table.getLastColumn()): newData = table.getColumn(col) if not newData is None: if col == ResultsTable.X_CENTROID: for idx in idxToRemove: centroidX.append(newData[idx]) if col == ResultsTable.Y_CENTROID: for idx in idxToRemove: centroidY.append(newData[idx]) if col == ResultsTable.ROI_X: for idx in idxToRemove: roiX.append(int(newData[idx])) if col == ResultsTable.ROI_Y: for idx in idxToRemove: roiY.append(int(newData[idx])) if col == ResultsTable.ROI_WIDTH: for idx in idxToRemove: roiWidth.append(int(newData[idx])) if col == ResultsTable.ROI_HEIGHT: for idx in idxToRemove: roiHeight.append(int(newData[idx])) if col == ResultsTable.AREA: for idx in idxToRemove: rArea.append(newData[idx]) for i in sorted(idxToRemove, reverse=True): del newData[i] stats[c][z][t][table.getColumnHeading(col)] = newData IJ.saveAs('png', os.path.join(outputPath, "PreFiltered_Segmentation_" + dbgOutDesc + "_particles.png")) # Remove the segmentation masks for the objects removed currProcessor = currIP.getProcessor() ff = FloodFiller(currProcessor) currIP.getProcessor().setValue(0) calib = resultsImage.getCalibration() sortedAreaIndices = [i[0] for i in sorted(enumerate(rArea), key=lambda x:x[1])] for idx in range(0, len(sortedAreaIndices)): i = sortedAreaIndices[idx] centX = int(calib.getRawX(centroidX[i])) centY = int(calib.getRawY(centroidY[i])) # Since the centroid isn't guaranteed to be part of the blob # search around until an active pixel is found found = False halfWidth = min([roiHeight[i], roiWidth[i]]) for offset in range(0,halfWidth): if found: break for x in range(centX-offset,centX+offset+1): if found: break for y in range(centY-offset,centY+offset+1): if not currProcessor.getPixel(x,y) == 0x0: found = True finalX = x finalY = y break if not found: print "\t\tZ = " + str(z) + ", T = " + str(t) + ", chan " + chanName + ": ERROR: Never found active pixel for filtered blob, centroid: " + str(centX) + ", " + str(centY) else: currProcessor.setRoi(roiX[i], roiY[i], roiWidth[i], roiHeight[i]) ff.fill8(finalX,finalY) #IJ.saveAs('png', os.path.join(outputPath, "Segmentation_" + dbgOutDesc + "_" + str(idx) + ".png")) #outImg = pa.getOutputImage() IJ.saveAs('png', os.path.join(outputPath, "Segmentation_" + dbgOutDesc + "_particles.png")) if cfg.hasValue(ELMConfig.createSegMask) and cfg.getValue(ELMConfig.createSegMask) == True: # Create segmentation mask segMask = currIP.duplicate() segMask.setTitle("SegMask_" + dbgOutDesc) # Iterate by smallest area first # We are more likely to correctly label small areas if len(newAreas) > 0: segProcessor = segMask.getProcessor() if (len(newAreas) > 255): segProcessor = segProcessor.convertToShort(True) segMask.setProcessor(segProcessor) ff = FloodFiller(segProcessor) sortedAreaIndices = [i[0] for i in sorted(enumerate(stats[c][z][t]['Area']), key=lambda x:x[1])] for idx in range(0, len(sortedAreaIndices)): row = sortedAreaIndices[idx] centX = int(stats[c][z][t]['X'][row]) centY = int(stats[c][z][t]['Y'][row]) roiX = int(stats[c][z][t]['BX'][row]) roiY = int(stats[c][z][t]['BY'][row]) roiWidth = int(stats[c][z][t]['Width'][row]) roiHeight = int(stats[c][z][t]['Height'][row]) area = stats[c][z][t]['Area'][row] halfRoiHeight = roiHeight/2 + 1 halfRoiWidth = roiWidth/2 + 1 # Since the centroid isn't guaranteed to be part of the blob # search around until an active pixel is found found = False for xOffset in range(0,halfRoiWidth): if found: break for yOffset in range(0, halfRoiHeight): if found: break for x in range(centX-xOffset,centX+xOffset+1): if found: break for y in range(centY-yOffset,centY+yOffset+1): # original image and this image for masked pixel # By checking original image, we avoid confusion with a label of 255 if segProcessor.getPixel(x,y) == 255 and currProcessor.getPixel(x,y) == 255: found = True finalX = x finalY = y break if not found: print "\t\tZ = " + str(z) + ", T = " + str(t) + ", chan " + chanName + ": ERROR: Never found active pixel for seg mask, centroid, roi, area (px): " \ + str(centX) + ", " + str(centY) + ", " + str(roiX) + ", " + str(roiY) + ", " + str(roiWidth) + ", " + str(roiHeight) + ", " + str(area) else: segProcessor.setRoi(roiX, roiY, roiWidth, roiHeight) segProcessor.setColor(row + 1) ff.fill8(finalX,finalY) lut = LutLoader.openLut(cfg.getValue(ELMConfig.lutPath)) segMask.setLut(lut) WindowManager.setTempCurrentImage(segMask); IJ.saveAs('png', os.path.join(outputPath, "SegMask_" + dbgOutDesc + "_particles.png")) startTime = time.time() width = currIP.getWidth(); height = currIP.getHeight(); overlayImage = resultsImage.duplicate() overlayImage.setTitle("Overlay_" + dbgOutDesc + "_particles") if not overlayImage.getType() == ImagePlus.COLOR_RGB: imgConvert = ImageConverter(overlayImage) imgConvert.convertToRGB() overlayProcessor = overlayImage.getProcessor() currProcessor = currIP.getProcessor() if (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.BRIGHTFIELD): maskColor = 0x0000ff00 elif (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.YELLOW): maskColor = 0x000000ff elif (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.RED): maskColor = 0x0000ff00 elif (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.GREEN): maskColor = 0x00ff0000 elif (cfg.getValue(ELMConfig.chanLabel)[c] == ELMConfig.BLUE): maskColor = 0x00ffff00 for x in range(0, width): for y in range(0,height): currPix = currProcessor.getPixel(x,y); if not currPix == 0x00000000: overlayProcessor.putPixel(x, y, maskColor) endTime = time.time() if not 'overlay' in times: times['overlay'] = [] times['overlay'].append(endTime-startTime) WindowManager.setTempCurrentImage(overlayImage); IJ.saveAs('png', os.path.join(outputPath, "Overlay_" + dbgOutDesc + "_particles.png")) #currIP.hide() currIP.close() resultsImage.close() timesAvg = {} for key in times: timeList = times[key] timesAvg[key] = sum(timeList) / len(timeList); print("processImage times " + str(timesAvg)) return stats
print invertedDict if imageOrTable == "Results table": rt = ResultsTable.getResultsTable(resultsName).clone() else: measureImp=WM.getImage(imageName) src2=clij2.push(measureImp) rt = ResultsTable() clij2.statisticsOfBackgroundAndLabelledPixels(src2, test.src,rt) src2.close() resultsName="Results table" try: labels = rt.getColumn(rt.getColumnIndex('TrackID')) frame = rt.getColumn(rt.getColumnIndex('Frame (Time)')) except: try: labels = rt.getColumn(rt.getColumnIndex('Label')) except: labels = rt.getColumn(rt.getColumnIndex('IDENTIFIER')) for i in range(len(labels)): try: rt.setValue("Label name", i,listOfNames[int(labels[i])]) rt.setValue("Label value", i,invertedDict[int(labels[i])]) except: print i
index = -1 maxArea = -1 if thresholdMode: imp.show() #WaitForUserDialog("Title", "I want to see the ROI").show() # Check if Column even exists (in case it didn't measure anything) if table.getColumnIndex("Area") != -1: # Find the ROI with the largest area for i, area in enumerate(table.getColumn(table.getColumnIndex("Area"))): if area > maxArea: index = i # Writes everything in the output file if index != -1: diameter = 2* math.sqrt( float(table.getValue("Area", index)) / (2* math.pi)) isOrganoid = table.getValue("Area", index) > area_threshold and table.getValue("Area", index) > round_threshold output.write(str(subfolder) + ',' + filename + ',' + str(table.getValue("Feret", index)) + ',' + str(table.getValue("MinFeret", index)) + ',' + str((table.getValue("MinFeret", index)+table.getValue("Feret", index))/2) + ',' + str(table.getValue("Area", index)) + ',' + str(diameter) + ',' + str(table.getValue("Major", index)) + ','+ str(table.getValue("Minor", index)) + ','+ str(table.getValue("Circ.", index)) + ',' +str(table.getValue("Round", index)) + ',' + str(table.getValue("Solidity", index)) + ',' + str(isOrganoid)) else: output.write(str(subfolder) + ',' + filename + ",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA") output.write('\n')
roim = RoiManager(True) # Create a ParticleAnalyzer, with arguments: # 1. options (could be SHOW_ROI_MASKS, SHOW_OUTLINES, SHOW_MASKS, SHOW_NONE, ADD_TO_MANAGER, and others; combined with bitwise-or) # 2. measurement options (see [http://imagej.net/developer/api/ij/measure/Measurements.html Measurements]) # 3. a ResultsTable to store the measurements # 4. The minimum size of a particle to consider for measurement # 5. The maximum size (idem) # 6. The minimum circularity of a particle # 7. The maximum circularity pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 0, Double.POSITIVE_INFINITY, 0.0, 1.0) pa.setHideOutputImage(True) if pa.analyze(imp): print "All ok" else: print "There was a problem in analyzing", blobs # The measured areas are listed in the first column of the results table, as a float array: areas = table.getColumn(0) # Create a new list to store the mean intensity values of each blob: means = [] for roi in RoiManager.getInstance().getRoisAsArray(): blobs.setRoi(roi) stats = blobs.getStatistics(Measurements.MEAN) means.append(stats.mean) for area, mean in zip(areas, means): print area, mean
def merge_incorrect_splits_and_get_centroids(imp, centroid_distance_limit=100, size_limit=100): """if particles are found with centroids closer than centroid_distance_limit and both have size<size_limit, get average centroid""" imp.killRoi() rt = ResultsTable() out_imp = IJ.createImage("Nuclei centroids from {}".format(imp.getTitle()), imp.getWidth(), imp.getHeight(), 1, 8) out_imp.show() IJ.run(out_imp, "Select All", "") IJ.run(out_imp, "Set...", "value=0 slice") out_imp.show() cal = imp.getCalibration() mxsz = imp.width * cal.pixelWidth * imp.height * cal.pixelHeight print("mxsz = {}".format(mxsz)) roim = RoiManager() imp.show() pa = ParticleAnalyzer( ParticleAnalyzer.ADD_TO_MANAGER, ParticleAnalyzer.AREA | ParticleAnalyzer.SLICE | ParticleAnalyzer.CENTROID, rt, 0, size_limit) pa.setRoiManager(roim) roim.reset() rt.reset() pa.analyze(imp) MyWaitForUser("paise", "pause post-merge incorrect splits particel analysis") rt_xs = rt.getColumn(rt.getColumnIndex("X")).tolist() rt_ys = rt.getColumn(rt.getColumnIndex("Y")).tolist() centroids = [(x, y) for x, y in zip(rt_xs, rt_ys)] print("centroids = {}".format(centroids)) centroids_set = set() for c in centroids: ds = [ math.sqrt((c[0] - cx)**2 + (c[1] - cy)**2) for (cx, cy) in centroids ] close_mask = [d < centroid_distance_limit for d in ds] # if no other centroids are within centroid_distance_limit, add this centroid to the output set # otherwise, add the average position of this centroid and those within centroid_distance_limit to the output set centroids_set.add( (sum([msk * b[0] for msk, b in zip(close_mask, centroids)]) / sum(close_mask), sum([msk * b[1] for msk, b in zip(close_mask, centroids)]) / sum(close_mask))) roim.reset() rt.reset() pa = ParticleAnalyzer( ParticleAnalyzer.ADD_TO_MANAGER, ParticleAnalyzer.AREA | ParticleAnalyzer.SLICE | ParticleAnalyzer.CENTROID, rt, size_limit, mxsz) pa.setRoiManager(roim) pa.analyze(imp) MyWaitForUser("paise", "pause post-merge incorrect splits particel analysis 2") if rt.columnExists("X"): rt_xs = rt.getColumn(rt.getColumnIndex("X")).tolist() rt_ys = rt.getColumn(rt.getColumnIndex("Y")).tolist() centroids = [(x, y) for x, y in zip(rt_xs, rt_ys)] for c in centroids: centroids_set.add(c) centroids = list(centroids_set) cal = imp.getCalibration() centroids = [(c[0] / cal.pixelWidth, c[1] / cal.pixelHeight) for c in centroids] print("new number of nuclei identified = {}".format(len(centroids))) roim.reset() roim.close() for idx, c in enumerate(centroids): roi = OvalRoi(c[0], c[1], 10, 10) out_imp.setRoi(roi) IJ.run(out_imp, "Set...", "value={} slice".format(idx + 1)) imp.changes = False #imp.close(); return out_imp
keep_rois = []; pa.analyze(imp); IJ.run("Set Measurements...", "centroid redirect=None decimal=3"); frames = imp.getNFrames(); for fridx in range(0, frames): rt.reset(); imp.setSliceWithoutUpdate(fridx + 1); ip = imp.getProcessor(); if not pa.analyze(imp, ip): raise Exception("something went wrong analysing particles!") rt.show("centroids"); rm = RoiManager.getInstance(); if rm.getCount() > 0: rois = rm.getRoisAsArray(); centroidsx = rt.getColumn(rt.getColumnIndex('X')); centroidsy = rt.getColumn(rt.getColumnIndex('Y')); print(centroidsx); print(centroidsy); gd = GenericDialog("Continue?"); gd.showDialog(); if gd.wasCanceled(): raise Exception("Run interupted"); for roi in rois: imp.setRoi(roi); stats = ImageStatistics().getStatistics(ip); print(stats.xCenterOfMass) #print(keep_rois)
def fretCalculations(imp1, nFrame, donorChannel, acceptorChannel, acceptorChannel2, table, gfx1, gfx2, gfx3, gfx4, gfx5, originalTitle): donorImp = extractChannel(imp1, donorChannel, nFrame) acceptorImp = extractChannel(imp1, acceptorChannel, nFrame) acceptorImp2 = extractChannel(imp1, acceptorChannel2, nFrame) #push donor and acceptor channels to gpu and threshold them both to remove saturated pixels gfx4 = clij2.push(donorImp) gfx5 = clij2.push(acceptorImp) gfx6 = clij2.create(gfx5) clij2.threshold(gfx4, gfx2, maxIntensity) clij2.binarySubtract(gfx3, gfx2, gfx6) clij2.threshold(gfx5, gfx2, maxIntensity) clij2.binarySubtract(gfx6, gfx2, gfx3) clij2.threshold(gfx3, gfx6, 0.5) clij2.multiplyImages(gfx6, gfx4, gfx2) clij2.multiplyImages(gfx6, gfx5, gfx4) gfx6 = clij2.push(acceptorImp2) #donor is gfx2, acceptor FRET is gfx4, segment channel (acceptor normal) is gfx6 results = ResultsTable() clij2.statisticsOfBackgroundAndLabelledPixels(gfx2, gfx1, results) donorChIntensity = results.getColumn(13) results2 = ResultsTable() clij2.statisticsOfBackgroundAndLabelledPixels(gfx4, gfx1, results2) acceptorChIntensity = results2.getColumn(13) results3 = ResultsTable() clij2.statisticsOfBackgroundAndLabelledPixels(gfx6, gfx1, results3) #calculate the fret ratios, removing any ROI with intensity of zero FRET = [] for i in xrange(len(acceptorChIntensity)): if (acceptorChIntensity[i] > 0) and (donorChIntensity[i] > 0): #don't write in the zeros to the results FRET.append((1000 * acceptorChIntensity[i] / donorChIntensity[i])) table.incrementCounter() table.addValue("Frame (Time)", nFrame) table.addValue("Label", i) table.addValue("Emission ratio", acceptorChIntensity[i] / donorChIntensity[i]) table.addValue("Mean donor emission", results.getValue("MEAN_INTENSITY", i)) table.addValue("Mean acceptor emission (FRET)", results2.getValue("MEAN_INTENSITY", i)) table.addValue("Mean acceptor emission", results3.getValue("MEAN_INTENSITY", i)) table.addValue("Sum donor emission", donorChIntensity[i]) table.addValue("Sum acceptor emission (FRET)", acceptorChIntensity[i]) table.addValue("Sum acceptor emission", results3.getValue("SUM_INTENSITY", i)) table.addValue( "Volume", cal.pixelWidth * cal.pixelHeight * cal.pixelDepth * results.getValue("PIXEL_COUNT", i)) table.addValue("Pixel count", results.getValue("PIXEL_COUNT", i)) table.addValue("x", cal.pixelWidth * results.getValue("CENTROID_X", i)) table.addValue("y", cal.pixelHeight * results.getValue("CENTROID_Y", i)) table.addValue("z", cal.pixelDepth * results.getValue("CENTROID_Z", i)) table.addValue("File name", originalTitle) else: #must write in the zeros as this array is used to generate the map of emission ratios FRET.append(0) table.show("Results of " + originalTitle) FRET[0] = 0 FRETarray = array("f", FRET) fp = FloatProcessor(len(FRET), 1, FRETarray, None) FRETImp = ImagePlus("FRETImp", fp) gfx4 = clij2.push(FRETImp) clij2.replaceIntensities(gfx1, gfx4, gfx5) maxProj = clij2.create(gfx5.getWidth(), gfx5.getHeight(), 1) clij2.maximumZProjection(gfx5, maxProj) #pull the images FRETimp2 = clij2.pull(gfx5) FRETProjImp = clij2.pull(maxProj) labelImp = clij2.pull(gfx1) clij2.clear() donorImp.close() acceptorImp.close() acceptorImp2.close() return table, FRETimp2, FRETProjImp, labelImp
def process(subFolder, outputDirectory, filename): #IJ.close() imp = IJ.openImage(inputDirectory + subFolder + '/' + rreplace(filename, "_ch00.tif", ".tif")) imp.show() # Get the pixel values from the xml file for file in os.listdir(inputDirectory + subFolder): if file.endswith('.xml'): xml = os.path.join(inputDirectory + subFolder, file) xml = "C:/Users/Harris/Desktop/test_xml_for_parsing_pixel.xml" element_tree = ET.parse(xml) root = element_tree.getroot() for dimensions in root.iter('DimensionDescription'): num_pixels = int(dimensions.attrib['NumberOfElements']) if dimensions.attrib['Unit'] == "m": length = float(dimensions.attrib['Length']) * 1000000 else: length = float(dimensions.attrib['Length']) pixel_length = length / num_pixels else: pixel_length = 0.8777017 IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=" + str(pixel_length) + " pixel_height=" + str(pixel_length) + " voxel_depth=25400.0508001") ic = ImageConverter(imp) ic.convertToGray8() #IJ.setThreshold(imp, 2, 255) #Automatically selects the area of the organoid based on automated thresholding and creates a mask to be applied on #all other images IJ.setAutoThreshold(imp, "Mean dark no-reset") IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Analyze Particles...", "size=100000-Infinity add select") rm = RoiManager.getInstance() num_roi = rm.getCount() for i in num_roi: imp = getCurrentImage() rm.select(imp, i) IJ.setBackgroundColor(0, 0, 0) IJ.run(imp, "Clear Outside", "") IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Remove Outliers...", "radius=5" + " threshold=50" + " which=Dark") IJ.run(imp, "Remove Outliers...", "radius=5" + " threshold=50" + " which=Bright") # Save the mask and open it IJ.saveAs("tiff", inputDirectory + '/mask' + i) mask = IJ.openImage(inputDirectory + '/mask' + i + '.tif') if not displayImages: imp.changes = False imp.close() images = [None] * 5 intensities = [None] * 5 blobsarea = [None] * 5 blobsnuclei = [None] * 5 bigAreas = [None] * 5 imp.close() # Loop to open all the channel images for chan in channels: v, x = chan images[x] = IJ.openImage(inputDirectory + subFolder + '/' + rreplace(filename, "_ch00.tif", "_ch0" + str(x) + ".tif")) # Apply Mask on all the images and save them into an array apply_mask = ImageCalculator() images[x] = apply_mask.run("Multiply create 32 bit", mask, images[x]) ic = ImageConverter(images[x]) ic.convertToGray8() imp = images[x] # Calculate the intensities for each channel as well as the organoid area for roi in rm.getRoisAsArray(): imp.setRoi(roi) stats_i = imp.getStatistics(Measurements.MEAN | Measurements.AREA) intensities[x] = stats_i.mean bigAreas[x] = stats_i.area rm.close() # Opens the ch00 image and sets default properties #Get the pixel values from the xml file for file in os.listdir(subFolder): if file.endswith('.xml'): xml = os.path.join(inputDirectory + subFolder, file) xml = "C:/Users/Harris/Desktop/test_xml_for_parsing_pixel.xml" element_tree = ET.parse(xml) root = element_tree.getroot() for dimensions in root.iter('DimensionDescription'): num_pixels = int(dimensions.attrib['NumberOfElements']) if dimensions.attrib['Unit'] == "m": length = float(dimensions.attrib['Length']) * 1000000 else: length = float(dimensions.attrib['Length']) pixel_length = length / num_pixels else: pixel_length = 0.8777017 imp = IJ.openImage(inputDirectory + subFolder + '/' + filename) imp = apply_mask.run("Multiply create 32 bit", mask, imp) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=" + str(pixel_length) + "pixel_height=" + str(pixel_length) + "voxel_depth=25400.0508001") # Sets the threshold and watersheds. for more details on image processing, see https://imagej.nih.gov/ij/developer/api/ij/process/ImageProcessor.html ic = ImageConverter(imp) ic.convertToGray8() IJ.run(imp, "Remove Outliers...", "radius=2" + " threshold=50" + " which=Dark") IJ.run(imp, "Gaussian Blur...", "sigma=" + str(blur)) IJ.setThreshold(imp, lowerBounds[0], 255) if displayImages: imp.show() IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Watershed", "") if not displayImages: imp.changes = False imp.close() # Counts and measures the area of particles and adds them to a table called areas. Also adds them to the ROI manager table = ResultsTable() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.setHideOutputImage(True) # imp = impM # imp.getProcessor().invert() pa.analyze(imp) areas = table.getColumn(0) # This loop goes through the remaining channels for the other markers, by replacing the ch00 at the end with its corresponding channel # It will save all the area fractions into a 2d array called areaFractionsArray areaFractionsArray = [None] * 5 for chan in channels: v, x = chan # Opens each image and thresholds imp = images[x] IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) ic.convertToGray8() IJ.setThreshold(imp, lowerBounds[x], 255) if displayImages: imp.show() WaitForUserDialog("Title", "Adjust Threshold for Marker " + v).show() IJ.run(imp, "Convert to Mask", "") # Measures the area fraction of the new image for each ROI from the ROI manager. areaFractions = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA_FRACTION) areaFractions.append(stats.areaFraction) # Saves the results in areaFractionArray areaFractionsArray[x] = areaFractions roim.close() for chan in channels: v, x = chan imp = images[x] imp.deleteRoi() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.analyze(imp) blobs = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA) blobs.append(stats.area) blobsarea[x] = sum( blobs ) #take this out and use intial mask tissue area from the beginning blobsnuclei[x] = len(blobs) if not displayImages: imp.changes = False imp.close() roim.reset() roim.close() imp.close() # Creates the summary dictionary which will correspond to a single row in the output csv, with each key being a column summary = {} summary['Image'] = filename summary['Directory'] = subFolder # Adds usual columns summary['size-average'] = 0 summary['#nuclei'] = 0 summary['all-negative'] = 0 summary['too-big-(>' + str(tooBigThreshold) + ')'] = 0 summary['too-small-(<' + str(tooSmallThreshold) + ')'] = 0 # Creates the fieldnames variable needed to create the csv file at the end. fieldnames = [ 'Name', 'Directory', 'Image', 'size-average', 'too-big-(>' + str(tooBigThreshold) + ')', 'too-small-(<' + str(tooSmallThreshold) + ')', '#nuclei', 'all-negative' ] # Adds the columns for each individual marker (ignoring Dapi since it was used to count nuclei) summary["organoid-area"] = bigAreas[x] fieldnames.append("organoid-area") for chan in channels: v, x = chan summary[v + "-positive"] = 0 fieldnames.append(v + "-positive") summary[v + "-intensity"] = intensities[x] fieldnames.append(v + "-intensity") summary[v + "-blobsarea"] = blobsarea[x] fieldnames.append(v + "-blobsarea") summary[v + "-blobsnuclei"] = blobsnuclei[x] fieldnames.append(v + "-blobsnuclei") # Adds the column for colocalization between first and second marker if len(channels) > 2: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-positive') # Adds the columns for colocalization between all three markers if len(channels) > 3: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[2][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive') # Loops through each particle and adds it to each field that it is True for. areaCounter = 0 for z, area in enumerate(areas): log.write(str(area)) log.write("\n") if area > tooBigThreshold: summary['too-big-(>' + str(tooBigThreshold) + ')'] += 1 elif area < tooSmallThreshold: summary['too-small-(<' + str(tooSmallThreshold) + ')'] += 1 else: summary['#nuclei'] += 1 areaCounter += area temp = 0 for chan in channels: v, x = chan if areaFractionsArray[x][z] > areaFractionThreshold[ 0]: # theres an error here im not sure why. i remember fixing it before summary[chan[0] + '-positive'] += 1 if x != 0: temp += 1 if temp == 0: summary['all-negative'] += 1 if len(channels) > 2: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[2][z] > areaFractionThreshold[2]: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] += 1 if len(channels) > 3: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[2][z] > areaFractionThreshold[2]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[1][z] > areaFractionThreshold[1]: summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 # Calculate the average of the particles sizes if float(summary['#nuclei']) > 0: summary['size-average'] = round(areaCounter / summary['#nuclei'], 2) # Opens and appends one line on the final csv file for the subfolder (remember that this is still inside the loop that goes through each image) with open(outputDirectory + "/" + outputName + ".csv", 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator='\n') if os.path.getsize(outputDirectory + "/" + outputName + ".csv") < 1: writer.writeheader() writer.writerow(summary) IJ.run(imp, "Close All", "")
gd = GenericDialog("Set Threshold Mode") gd.addChoice("Do you want to continue with this threshold?", ["No,choose again", "Yes, use this threshold."],"No") gd.showDialog() copy.changes = False copy.close() if gd.getNextChoice() == "Yes, use this threshold.": happy = True #adds count to summary if table.getColumnIndex("Area") != -1: summary[color[i] + "-ROI-count"] = len(table.getColumn(table.getColumnIndex("Area"))) doubles=table.getColumnAsDoubles(table.getColumnIndex("Area")) summary[color[i]+ "-Total-area"] =sum(doubles) arr=[] for x, y in enumerate(doubles): if(y>=100) & (y<=3000): arr.append(y) summary[color[i]+"-Cell-Count"]=len(arr) summary[color[i]+"-Cell-Area"]=sum(arr) summary[color[i]+"-Max-ROI"]=maximum_size[i] summary[color[i]+"-Min-ROI"]=minimum_size[i] summary[color[i]+"-ratio-cell count/tissue area"]=len(arr)/summary["Tissue-area"] summary[color[i]+"-ratio-particles/tissue area"]=len(table.getColumn(table.getColumnIndex("Area")))/summary["Tissue-area"] summary[color[i]+"-ratio-totalareaofparticles/tissue area"]=sum(doubles)/summary["Tissue-area"]
table.setValue("TRACK_ID", rowNumber, id) table.setValue("POSITION_X", rowNumber, x) table.setValue("POSITION_Y", rowNumber, y) table.setValue("FRAME", rowNumber, t) table.setValue("MEAN_INTENSITY", rowNumber, mean) table.setValue("STANDARD_DEVIATION", rowNumber, std) table.setValue("SNR", rowNumber, snr) rowNumber = rowNumber + 1 # roi1 = PointRoi(x/dx, y/dy) # roi1.setPosition(int(t)) # rm.add(imp, roi1, nextRoi) # nextRoi = nextRoi+1 frame = table.getColumn(3) mean = table.getColumn(4) std = table.getColumn(5) snr = table.getColumn(6) var = [s / m for s,m in zip(std, mean)] from collections import Counter as Counter idxvec = [item for item, count in Counter(frame).items() if count > 1] if idxvec == []: continue division = min(idxvec) idx = frame.index(division)+1 mean = mean[:idx] frame = frame[:idx] std = std[:idx]
def process(subFolder, outputDirectory, filename): imp = IJ.openImage(inputDirectory + subFolder + '/' + rreplace(filename, "_ch00.tif", ".tif")) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) ic.convertToGray8() IJ.setThreshold(imp, 2, 255) IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Remove Outliers...", "radius=5" + " threshold=50" + " which=Dark") IJ.run(imp, "Remove Outliers...", "radius=5" + " threshold=50" + " which=Bright") imp.getProcessor().invert() rm = RoiManager(True) imp.getProcessor().setThreshold(0, 0, ImageProcessor.NO_LUT_UPDATE) boundroi = ThresholdToSelection.run(imp) rm.addRoi(boundroi) if not displayImages: imp.changes = False imp.close() images = [None] * 5 intensities = [None] * 5 blobsarea = [None] * 5 blobsnuclei = [None] * 5 bigAreas = [None] * 5 for chan in channels: v, x = chan images[x] = IJ.openImage(inputDirectory + subFolder + '/' + rreplace(filename, "_ch00.tif", "_ch0" + str(x) + ".tif")) imp = images[x] for roi in rm.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.MEAN | Measurements.AREA) intensities[x] = stats.mean bigAreas[x] = stats.area rm.close() # Opens the ch00 image and sets default properties imp = IJ.openImage(inputDirectory + subFolder + '/' + filename) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) # Sets the threshold and watersheds. for more details on image processing, see https://imagej.nih.gov/ij/developer/api/ij/process/ImageProcessor.html ic = ImageConverter(imp) ic.convertToGray8() IJ.run(imp, "Remove Outliers...", "radius=2" + " threshold=50" + " which=Dark") IJ.run(imp, "Gaussian Blur...", "sigma=" + str(blur)) IJ.setThreshold(imp, lowerBounds[0], 255) if displayImages: imp.show() IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Watershed", "") if not displayImages: imp.changes = False imp.close() # Counts and measures the area of particles and adds them to a table called areas. Also adds them to the ROI manager table = ResultsTable() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.setHideOutputImage(True) #imp = impM # imp.getProcessor().invert() pa.analyze(imp) areas = table.getColumn(0) # This loop goes through the remaining channels for the other markers, by replacing the ch00 at the end with its corresponding channel # It will save all the area fractions into a 2d array called areaFractionsArray areaFractionsArray = [None] * 5 for chan in channels: v, x = chan # Opens each image and thresholds imp = images[x] IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) ic.convertToGray8() IJ.setThreshold(imp, lowerBounds[x], 255) if displayImages: imp.show() WaitForUserDialog("Title", "Adjust Threshold for Marker " + v).show() IJ.run(imp, "Convert to Mask", "") # Measures the area fraction of the new image for each ROI from the ROI manager. areaFractions = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA_FRACTION) areaFractions.append(stats.areaFraction) # Saves the results in areaFractionArray areaFractionsArray[x] = areaFractions roim.close() for chan in channels: v, x = chan imp = images[x] imp.deleteRoi() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.analyze(imp) blobs = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA) blobs.append(stats.area) blobsarea[x] = sum(blobs) blobsnuclei[x] = len(blobs) if not displayImages: imp.changes = False imp.close() roim.reset() roim.close() # Creates the summary dictionary which will correspond to a single row in the output csv, with each key being a column summary = {} summary['Image'] = filename summary['Directory'] = subFolder # Adds usual columns summary['size-average'] = 0 summary['#nuclei'] = 0 summary['all-negative'] = 0 summary['too-big-(>' + str(tooBigThreshold) + ')'] = 0 summary['too-small-(<' + str(tooSmallThreshold) + ')'] = 0 # Creates the fieldnames variable needed to create the csv file at the end. fieldnames = [ 'Name', 'Directory', 'Image', 'size-average', 'too-big-(>' + str(tooBigThreshold) + ')', 'too-small-(<' + str(tooSmallThreshold) + ')', '#nuclei', 'all-negative' ] # Adds the columns for each individual marker (ignoring Dapi since it was used to count nuclei) summary["organoid-area"] = bigAreas[x] fieldnames.append("organoid-area") for chan in channels: v, x = chan summary[v + "-positive"] = 0 fieldnames.append(v + "-positive") summary[v + "-intensity"] = intensities[x] fieldnames.append(v + "-intensity") summary[v + "-blobsarea"] = blobsarea[x] fieldnames.append(v + "-blobsarea") summary[v + "-blobsnuclei"] = blobsnuclei[x] fieldnames.append(v + "-blobsnuclei") # Adds the column for colocalization between first and second marker if len(channels) > 2: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-positive') # Adds the columns for colocalization between all three markers if len(channels) > 3: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[2][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive') # Loops through each particle and adds it to each field that it is True for. areaCounter = 0 for z, area in enumerate(areas): log.write(str(area)) log.write("\n") if area > tooBigThreshold: summary['too-big-(>' + str(tooBigThreshold) + ')'] += 1 elif area < tooSmallThreshold: summary['too-small-(<' + str(tooSmallThreshold) + ')'] += 1 else: summary['#nuclei'] += 1 areaCounter += area temp = 0 for chan in channels: v, x = chan if areaFractionsArray[x][z] > areaFractionThreshold[ 0]: #theres an error here im not sure why. i remember fixing it before summary[chan[0] + '-positive'] += 1 if x != 0: temp += 1 if temp == 0: summary['all-negative'] += 1 if len(channels) > 2: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[2][z] > areaFractionThreshold[2]: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] += 1 if len(channels) > 3: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[2][z] > areaFractionThreshold[2]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[1][z] > areaFractionThreshold[1]: summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 # Calculate the average of the particles sizes if float(summary['#nuclei']) > 0: summary['size-average'] = round(areaCounter / summary['#nuclei'], 2) # Opens and appends one line on the final csv file for the subfolder (remember that this is still inside the loop that goes through each image) with open(outputDirectory + "/" + outputName + ".csv", 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator='\n') if os.path.getsize(outputDirectory + "/" + outputName + ".csv") < 1: writer.writeheader() writer.writerow(summary)
# 2. measurement options (see [http://imagej.net/developer/api/ij/measure/Measurements.html Measurements]) # 3. a ResultsTable to store the measurements # 4. The minimum size of a particle to consider for measurement # 5. The maximum size (idem) # 6. The minimum circularity of a particle # 7. The maximum circularity minSize = 30.0 maxSize = 10000.0 opts = ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES | ParticleAnalyzer.SHOW_OVERLAY_OUTLINES print(opts) meas = Measurements.AREA | Measurements.MEAN | Measurements.CENTER_OF_MASS print(meas) pa = ParticleAnalyzer(opts, meas, results_table, minSize, maxSize) # pa.setHideOutputImage(False) pa.setRoiManager(roim) if pa.analyze(imp_work): imp_out = pa.getOutputImage() # imp_out.show() roim.runCommand(blobs, "Show All with labels") blobs.show() results_table.show("Results") roim.show() print "All ok" else: print "There was a problem in analyzing", blobs # The measured areas are listed in the first column of the results table, as a float array: areas = results_table.getColumn(0)
def analyze_homogeneity(image_title): IJ.selectWindow(image_title) raw_imp = IJ.getImage() IJ.run(raw_imp, "Duplicate...", "title=Homogeneity duplicate") IJ.selectWindow('Homogeneity') hg_imp = IJ.getImage() # Get a 2D image if hg_imp.getNSlices() > 1: IJ.run(hg_imp, "Z Project...", "projection=[Average Intensity]") hg_imp.close() IJ.selectWindow('MAX_Homogeneity') hg_imp = IJ.getImage() hg_imp.setTitle('Homogeneity') # Blur and BG correct the image IJ.run(hg_imp, 'Gaussian Blur...', 'sigma=' + str(HOMOGENEITY_RADIUS) + ' stack') # Detect the spots IJ.setAutoThreshold(hg_imp, HOMOGENEITY_THRESHOLD + " dark") rm = RoiManager(True) table = ResultsTable() pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES, Measurements.AREA, # measurements table, # Output table 0, # MinSize 500, # MaxSize 0.0, # minCirc 1.0) # maxCirc pa.setHideOutputImage(True) pa.analyze(hg_imp) areas = table.getColumn(table.getHeadings().index('Area')) median_areas = compute_median(areas) st_dev_areas = compute_std_dev(areas, median_areas) thresholds_areas = (median_areas - (2 * st_dev_areas), median_areas + (2 * st_dev_areas)) roi_measurements = {'integrated_density': [], 'max': [], 'area': []} IJ.setForegroundColor(0, 0, 0) for roi in rm.getRoisAsArray(): hg_imp.setRoi(roi) if REMOVE_CROSS and hg_imp.getStatistics().AREA > thresholds_areas[1]: rm.runCommand('Fill') else: roi_measurements['integrated_density'].append(hg_imp.getStatistics().INTEGRATED_DENSITY) roi_measurements['max'].append(hg_imp.getStatistics().MIN_MAX) roi_measurements['integrated_densities'].append(hg_imp.getStatistics().AREA) rm.runCommand('Delete') measuremnts = {'mean_integrated_density': compute_mean(roi_measurements['integrated_density']), 'median_integrated_density': compute_median(roi_measurements['integrated_density']), 'std_dev_integrated_density': compute_std_dev(roi_measurements['integrated_density']), 'mean_max': compute_mean(roi_measurements['max']), 'median_max': compute_median(roi_measurements['max']), 'std_dev_max': compute_std_dev(roi_measurements['max']), 'mean_area': compute_mean(roi_measurements['max']), 'median_area': compute_median(roi_measurements['max']), 'std_dev_area': compute_std_dev(roi_measurements['max']), } # generate homogeinity image # calculate interpoint distance in pixels nr_point_columns = int(sqrt(len(measuremnts['mean_max']))) # TODO: This is a rough estimation that does not take into account margins or rectangular FOVs inter_point_dist = hg_imp.getWidth() / nr_point_columns IJ.run(hg_imp, "Maximum...", "radius="+(inter_point_dist*1.22)) # Normalize to 100 IJ.run(hg_imp, "Divide...", "value=" + max(roi_measurements['max'] / 100)) IJ.run(hg_imp, "Gaussian Blur...", "sigma=" + (inter_point_dist/2)) hg_imp.getProcessor.setMinAndMax(0, 255) # Create a LUT based on a predefined threshold red = zeros(256, 'b') green = zeros(256, 'b') blue = zeros(256, 'b') acceptance_threshold = HOMOGENEITY_ACCEPTANCE_THRESHOLD * 256 / 100 for i in range(256): red[i] = (i - acceptance_threshold) green[i] = (i) homogeneity_LUT = LUT(red, green, blue) hg_imp.setLut(homogeneity_LUT) return hg_imp, measuremnts
def procOneImage(pathpre, wnumber, endings): """ Analyzes a single image set (Dapi, VSVG, PM images) pathpre: fullpath prefix, down till "endings". endings: a dictionary with signiture for three different channels. wnumber: a number in string, indicating the spot ID. Returns three results tables. """ imp = IJ.openImage(pathpre + endings['dapi'] + '.tif') impVSVG = IJ.openImage(pathpre + endings['vsvg'] + '.tif') impPM = IJ.openImage(pathpre + endings['pm'] + '.tif') imp2 = imp.duplicate() rtallcellPM = ResultsTable() rtjnucVSVG = ResultsTable() rtallcellVSVG = ResultsTable() backVSVG = backgroundSubtraction(impVSVG) backPM = backgroundSubtraction(impPM) impfilteredNuc = nucleusSegmentation(imp2) intmax = impfilteredNuc.getProcessor().getMax() if intmax == 0: return rtallcellPM, rtjnucVSVG, rtallcellVSVG impfilteredNuc.getProcessor().setThreshold(1, intmax, ImageProcessor.NO_LUT_UPDATE) nucroi = ThresholdToSelection().convert(impfilteredNuc.getProcessor()) nucroiA = ShapeRoi(nucroi).getRois() #print nucroiA allcellA = [roiEnlarger(r) for r in nucroiA] jnucroiA = [roiRingGenerator(r) for r in nucroiA] #print allcellA print 'Detected Cells: ', len(jnucroiA) if len(jnucroiA) <2: print "measurement omitted, as there is only on nucleus detected" return rtallcellPM, rtjnucVSVG, rtallcellVSVG if (GUIMODE): rm = RoiManager() for r in jnucroiA: rm.addRoi(r) rm.show() impfilteredNuc.show() measOpt = PA.AREA + PA.MEAN + PA.CENTROID + PA.STD_DEV + PA.SHAPE_DESCRIPTORS + PA.INTEGRATED_DENSITY + PA.MIN_MAX +\ PA.SKEWNESS + PA.KURTOSIS + PA.MEDIAN + PA.MODE ## All Cell Plasma Membrane intensity measureROIs(impPM, measOpt, rtallcellPM, allcellA, backPM, True) meanInt_Cell = rtallcellPM.getColumn(rtallcellPM.getColumnIndex('Mean')) print "Results Table rownumber:", len(meanInt_Cell) # JuxtaNuclear VSVG intensity measureROIs(impVSVG, measOpt, rtjnucVSVG, jnucroiA, backVSVG, False) meanInt_jnuc = rtjnucVSVG.getColumn(rtjnucVSVG.getColumnIndex('Mean')) # AllCell VSVG intensity measureROIs(impVSVG, measOpt, rtallcellVSVG, allcellA, backVSVG, True) meanInt_vsvgall = rtallcellVSVG.getColumn(rtallcellVSVG.getColumnIndex('Mean')) #Calculation of Transport Ratio JuxtaNuclear VSVG intensity / All Cell Plasma Membrane intensity results will be appended to PM results table. for i in range(len(meanInt_Cell)): if meanInt_Cell[i] != 0.0: transportR = meanInt_jnuc[i] / meanInt_Cell[i] transportRall = meanInt_vsvgall[i] / meanInt_Cell[i] else: transportR = float('inf') transportRall = float('inf') rtjnucVSVG.setValue('TransportRatio', i, transportR) rtallcellVSVG.setValue('TransportRatio', i, transportRall) rtjnucVSVG.setValue('WellNumber', i, int(wnumber)) rtallcellVSVG.setValue('WellNumber', i, int(wnumber)) rtallcellPM.setValue('WellNumber', i, int(wnumber)) return rtallcellPM, rtjnucVSVG, rtallcellVSVG