def showimage(self): roim = RoiManager.getInstance() if roim is None: roim = RoiManager() IJ.run("Close All") IJ.run("Clear Results") try: roim.reset() except AttributeError: roim.runCommand("reset") obj = self.fcsimages[self.idximg][0] imgName = self.fcsimages[self.idximg][1] img = BF.openImagePlus(imgName)[0] img.setZ(obj[1][2]+1) img.setC(3) IJ.run(img, "Grays", ""); img.setC(1) img.show() #draw rois for i in range(1, len(obj)+1): PR = PointRoi(obj[i][0],obj[i][1]) try: PR.setSize(3) PR.setPointType(0) roim.addRoi(PR) except: roim.addRoi(PR) roim.runCommand('Show All with Labels')
IC(imp).convertToRGB() #show image imp.show() #Define ROI of whole image (basically) imp.setRoi(1,1,478,479) ######OPTIONAL############## IJ.run("Brightness/Contrast...") IJ.run(imp, "Enhance Contrast", "saturated=.8") #open and clear ROI manager rm = RoiManager.getInstance() if not rm: rm = RoiManager() rm.reset() #If want to choose regions #IJ.setTool("rectangle") #waiting = WaitForUserDialog("Action required","Draw a single ROI with all puncta of interest inside! Then hit OK") #waiting.show() #set title variable of image Title=imp.getTitle() #make output as outpath and title outdir=os.path.join(outdir,Title) #run puncta analyzer IJ.run(imp, "Puncta Analyzer", "condition=1 red green subtract save rolling=50 light"); #save results IJ.selectWindow("Results") IJ.saveAs("Results", outdir + ".csv")
if thresholdMode: channel.show() WaitForUserDialog("Title", "Look at threshold for" + color[i]).show() #adds count to summary if table.getColumnIndex("Area") != -1: summary[color[i] + "-ROI-count"] = len(table.getColumn(table.getColumnIndex("Area"))) channel.changes = False channel.close() roim.reset() roim.close() # Writes everything in the output file fieldnames = ["Directory", "Filename", "Red-intensity", "Red-threshold-used", "Red-ROI-count", "Green-intensity", "Green-threshold-used", "Green-ROI-count", "Blue-intensity", "Blue-threshold-used", "Blue-ROI-count"] with open(output_name, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator = '\n') if os.path.getsize(output_name) < 1: writer.writeheader() writer.writerow(summary) # End of macro
def process(subFolder, outputDirectory, filename): imp = IJ.openImage(inputDirectory + subFolder + '/' + rreplace(filename, "_ch00.tif", ".tif")) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) ic.convertToGray8() IJ.setThreshold(imp, 2, 255) IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Remove Outliers...", "radius=5" + " threshold=50" + " which=Dark") IJ.run(imp, "Remove Outliers...", "radius=5" + " threshold=50" + " which=Bright") imp.getProcessor().invert() rm = RoiManager(True) imp.getProcessor().setThreshold(0, 0, ImageProcessor.NO_LUT_UPDATE) boundroi = ThresholdToSelection.run(imp) rm.addRoi(boundroi) if not displayImages: imp.changes = False imp.close() images = [None] * 5 intensities = [None] * 5 blobsarea = [None] * 5 blobsnuclei = [None] * 5 bigAreas = [None] * 5 for chan in channels: v, x = chan images[x] = IJ.openImage(inputDirectory + subFolder + '/' + rreplace(filename, "_ch00.tif", "_ch0" + str(x) + ".tif")) imp = images[x] for roi in rm.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.MEAN | Measurements.AREA) intensities[x] = stats.mean bigAreas[x] = stats.area rm.close() # Opens the ch00 image and sets default properties imp = IJ.openImage(inputDirectory + subFolder + '/' + filename) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) # Sets the threshold and watersheds. for more details on image processing, see https://imagej.nih.gov/ij/developer/api/ij/process/ImageProcessor.html ic = ImageConverter(imp) ic.convertToGray8() IJ.run(imp, "Remove Outliers...", "radius=2" + " threshold=50" + " which=Dark") IJ.run(imp, "Gaussian Blur...", "sigma=" + str(blur)) IJ.setThreshold(imp, lowerBounds[0], 255) if displayImages: imp.show() IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Watershed", "") if not displayImages: imp.changes = False imp.close() # Counts and measures the area of particles and adds them to a table called areas. Also adds them to the ROI manager table = ResultsTable() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.setHideOutputImage(True) #imp = impM # imp.getProcessor().invert() pa.analyze(imp) areas = table.getColumn(0) # This loop goes through the remaining channels for the other markers, by replacing the ch00 at the end with its corresponding channel # It will save all the area fractions into a 2d array called areaFractionsArray areaFractionsArray = [None] * 5 for chan in channels: v, x = chan # Opens each image and thresholds imp = images[x] IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) ic.convertToGray8() IJ.setThreshold(imp, lowerBounds[x], 255) if displayImages: imp.show() WaitForUserDialog("Title", "Adjust Threshold for Marker " + v).show() IJ.run(imp, "Convert to Mask", "") # Measures the area fraction of the new image for each ROI from the ROI manager. areaFractions = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA_FRACTION) areaFractions.append(stats.areaFraction) # Saves the results in areaFractionArray areaFractionsArray[x] = areaFractions roim.close() for chan in channels: v, x = chan imp = images[x] imp.deleteRoi() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.analyze(imp) blobs = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA) blobs.append(stats.area) blobsarea[x] = sum(blobs) blobsnuclei[x] = len(blobs) if not displayImages: imp.changes = False imp.close() roim.reset() roim.close() # Creates the summary dictionary which will correspond to a single row in the output csv, with each key being a column summary = {} summary['Image'] = filename summary['Directory'] = subFolder # Adds usual columns summary['size-average'] = 0 summary['#nuclei'] = 0 summary['all-negative'] = 0 summary['too-big-(>' + str(tooBigThreshold) + ')'] = 0 summary['too-small-(<' + str(tooSmallThreshold) + ')'] = 0 # Creates the fieldnames variable needed to create the csv file at the end. fieldnames = [ 'Name', 'Directory', 'Image', 'size-average', 'too-big-(>' + str(tooBigThreshold) + ')', 'too-small-(<' + str(tooSmallThreshold) + ')', '#nuclei', 'all-negative' ] # Adds the columns for each individual marker (ignoring Dapi since it was used to count nuclei) summary["organoid-area"] = bigAreas[x] fieldnames.append("organoid-area") for chan in channels: v, x = chan summary[v + "-positive"] = 0 fieldnames.append(v + "-positive") summary[v + "-intensity"] = intensities[x] fieldnames.append(v + "-intensity") summary[v + "-blobsarea"] = blobsarea[x] fieldnames.append(v + "-blobsarea") summary[v + "-blobsnuclei"] = blobsnuclei[x] fieldnames.append(v + "-blobsnuclei") # Adds the column for colocalization between first and second marker if len(channels) > 2: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-positive') # Adds the columns for colocalization between all three markers if len(channels) > 3: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[2][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive') # Loops through each particle and adds it to each field that it is True for. areaCounter = 0 for z, area in enumerate(areas): log.write(str(area)) log.write("\n") if area > tooBigThreshold: summary['too-big-(>' + str(tooBigThreshold) + ')'] += 1 elif area < tooSmallThreshold: summary['too-small-(<' + str(tooSmallThreshold) + ')'] += 1 else: summary['#nuclei'] += 1 areaCounter += area temp = 0 for chan in channels: v, x = chan if areaFractionsArray[x][z] > areaFractionThreshold[ 0]: #theres an error here im not sure why. i remember fixing it before summary[chan[0] + '-positive'] += 1 if x != 0: temp += 1 if temp == 0: summary['all-negative'] += 1 if len(channels) > 2: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[2][z] > areaFractionThreshold[2]: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] += 1 if len(channels) > 3: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[2][z] > areaFractionThreshold[2]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[1][z] > areaFractionThreshold[1]: summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 # Calculate the average of the particles sizes if float(summary['#nuclei']) > 0: summary['size-average'] = round(areaCounter / summary['#nuclei'], 2) # Opens and appends one line on the final csv file for the subfolder (remember that this is still inside the loop that goes through each image) with open(outputDirectory + "/" + outputName + ".csv", 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator='\n') if os.path.getsize(outputDirectory + "/" + outputName + ".csv") < 1: writer.writeheader() writer.writerow(summary)
fs = os.listdir(dir.getDirectory()) csv_path = os.path.join(dir.getDirectory(), 'cell_coordinates.csv') #img_path = os.path.join(dir.getDirectory(), 'input', 'dapi_max-z.png') print(csv_path) #print(img_path) #imp = IJ.openImage(img_path) #imp.show() roi_manager = RoiManager() for gene in gene_list: roi_manager.reset() with open(csv_path) as csvfile: reader = csv.DictReader(csvfile) for n, row in enumerate(reader): # print(row['cell_n']) poly_name = row['gene_name'] # poly_name = ast.literal_eval(poly_name) if gene == poly_name: print(gene, poly_name) rr = row['row_pixels'] cc = row['col_pixels'] rs = ast.literal_eval(rr) cs = ast.literal_eval(cc) proi = PolygonRoi(cs, rs, len(rs), Roi.POLYGON) roi_manager.addRoi(proi) roi_manager.runCommand("Deselect")
def load_rois(roifile): rm = RoiManager(False) rm.reset() rm.runCommand("Open", roifile) rois = rm.getRoisAsArray() return rois
def merge_incorrect_splits_and_get_centroids(imp, centroid_distance_limit=100, size_limit=100): """if particles are found with centroids closer than centroid_distance_limit and both have size<size_limit, get average centroid""" imp.killRoi() rt = ResultsTable() out_imp = IJ.createImage("Nuclei centroids from {}".format(imp.getTitle()), imp.getWidth(), imp.getHeight(), 1, 8) out_imp.show() IJ.run(out_imp, "Select All", "") IJ.run(out_imp, "Set...", "value=0 slice") out_imp.show() cal = imp.getCalibration() mxsz = imp.width * cal.pixelWidth * imp.height * cal.pixelHeight print("mxsz = {}".format(mxsz)) roim = RoiManager() imp.show() pa = ParticleAnalyzer( ParticleAnalyzer.ADD_TO_MANAGER, ParticleAnalyzer.AREA | ParticleAnalyzer.SLICE | ParticleAnalyzer.CENTROID, rt, 0, size_limit) pa.setRoiManager(roim) roim.reset() rt.reset() pa.analyze(imp) MyWaitForUser("paise", "pause post-merge incorrect splits particel analysis") rt_xs = rt.getColumn(rt.getColumnIndex("X")).tolist() rt_ys = rt.getColumn(rt.getColumnIndex("Y")).tolist() centroids = [(x, y) for x, y in zip(rt_xs, rt_ys)] print("centroids = {}".format(centroids)) centroids_set = set() for c in centroids: ds = [ math.sqrt((c[0] - cx)**2 + (c[1] - cy)**2) for (cx, cy) in centroids ] close_mask = [d < centroid_distance_limit for d in ds] # if no other centroids are within centroid_distance_limit, add this centroid to the output set # otherwise, add the average position of this centroid and those within centroid_distance_limit to the output set centroids_set.add( (sum([msk * b[0] for msk, b in zip(close_mask, centroids)]) / sum(close_mask), sum([msk * b[1] for msk, b in zip(close_mask, centroids)]) / sum(close_mask))) roim.reset() rt.reset() pa = ParticleAnalyzer( ParticleAnalyzer.ADD_TO_MANAGER, ParticleAnalyzer.AREA | ParticleAnalyzer.SLICE | ParticleAnalyzer.CENTROID, rt, size_limit, mxsz) pa.setRoiManager(roim) pa.analyze(imp) MyWaitForUser("paise", "pause post-merge incorrect splits particel analysis 2") if rt.columnExists("X"): rt_xs = rt.getColumn(rt.getColumnIndex("X")).tolist() rt_ys = rt.getColumn(rt.getColumnIndex("Y")).tolist() centroids = [(x, y) for x, y in zip(rt_xs, rt_ys)] for c in centroids: centroids_set.add(c) centroids = list(centroids_set) cal = imp.getCalibration() centroids = [(c[0] / cal.pixelWidth, c[1] / cal.pixelHeight) for c in centroids] print("new number of nuclei identified = {}".format(len(centroids))) roim.reset() roim.close() for idx, c in enumerate(centroids): roi = OvalRoi(c[0], c[1], 10, 10) out_imp.setRoi(roi) IJ.run(out_imp, "Set...", "value={} slice".format(idx + 1)) imp.changes = False #imp.close(); return out_imp
def process(subFolder, outputDirectory, filename): imp = IJ.openImage(inputDirectory + subFolder + '/' + filename) imp.show() IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) dup = imp.duplicate() dup_title = dup.getTitle() ic.convertToGray8() imp.updateAndDraw() IJ.run("Threshold...") IJ.setThreshold(218, 245) IJ.run(imp, "Convert to Mask", "") rm = RoiManager() imp.getProcessor().setThreshold(0, 0, ImageProcessor.NO_LUT_UPDATE) boundroi = ThresholdToSelection.run(imp) rm.addRoi(boundroi) imp.changes = False imp.close() images = [None] * 5 intensities = [None] * 5 blobsarea = [None] * 5 blobsnuclei = [None] * 5 cells = [None] * 5 bigareas = [None] * 5 IJ.run(dup, "Colour Deconvolution", "vectors=[H DAB]") images[0] = getImage(dup_title + "-(Colour_1)") images[1] = getImage(dup_title + "-(Colour_2)") images[2] = getImage(dup_title + "-(Colour_3)") images[2].close() for chan in channels: v, x = chan imp = images[x] imp.show() for roi in rm.getRoiManager().getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.MEAN | Measurements.AREA) intensities[x] = stats.mean bigareas[x] = stats.area rm.runCommand(imp, "Show None") rm.close() # Opens the ch00 image and sets default properties imp = images[0].duplicate() IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) # Sets the threshold and watersheds. for more details on image processing, see https://imagej.nih.gov/ij/developer/api/ij/process/ImageProcessor.html imp.show() setTempCurrentImage(imp) ic = ImageConverter(imp) imp.updateAndDraw() IJ.run(imp, "Gaussian Blur...", "sigma=" + str(blur)) imp.updateAndDraw() imp.show() IJ.run("Threshold...") IJ.setThreshold(30, lowerBounds[0]) if displayImages: imp.show() WaitForUserDialog( "Title", "Adjust threshold for nuclei. Current region is: " + region).show() IJ.run(imp, "Convert to Mask", "") # Counts and measures the area of particles and adds them to a table called areas. Also adds them to the ROI manager table = ResultsTable() roim = RoiManager() pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 5, 9999999999999999, 0.05, 1.0) pa.setHideOutputImage(True) imp = IJ.getImage() # imp.getProcessor().invert() pa.analyze(imp) imp.changes = False imp.close() areas = table.getColumn(0) # This loop goes through the remaining channels for the other markers, by replacing the ch00 at the end with its corresponding channel # It will save all the area fractions into a 2d array called areaFractionsArray areaFractionsArray = [None] * 5 maxThresholds = [] for chan in channels: v, x = chan # Opens each image and thresholds imp = images[x] IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) imp.show() setTempCurrentImage(imp) ic = ImageConverter(imp) ic.convertToGray8() imp.updateAndDraw() rm.runCommand(imp, "Show None") rm.runCommand(imp, "Show All") rm.runCommand(imp, "Show None") imp.show() IJ.selectWindow(imp.getTitle()) IJ.run("Threshold...") IJ.setThreshold(20, lowerBounds[x]) if displayImages: WaitForUserDialog( "Title", "Adjust threshold for " + v + ". Current region is: " + region).show() ip = imp.getProcessor() maxThresholds.append(ip.getMaxThreshold()) IJ.run(imp, "Convert to Mask", "") # Measures the area fraction of the new image for each ROI from the ROI manager. areaFractions = [] for roi in roim.getRoiManager().getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA_FRACTION) areaFractions.append(stats.areaFraction) # Saves the results in areaFractionArray areaFractionsArray[x] = areaFractions roim.close() for chan in channels: v, x = chan imp = images[x] imp.deleteRoi() imp.updateAndDraw() setTempCurrentImage(imp) roim = RoiManager() pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.analyze(imp) blobs = [] cell = [] for roi in roim.getRoiManager().getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA) blobs.append(stats.area) if stats.area > tooSmallThresholdDAB and stats.area < tooBigThresholdDAB: cell.append(stats.area) blobsarea[x] = sum(blobs) blobsnuclei[x] = len(blobs) cells[x] = len(cell) imp.changes = False imp.close() roim.reset() roim.close() # Creates the summary dictionary which will correspond to a single row in the output csv, with each key being a column summary = {} summary['Image'] = filename summary['Directory'] = subFolder # Adds usual columns summary['size-average'] = 0 summary['#nuclei'] = 0 summary['all-negative'] = 0 summary['too-big-(>' + str(tooBigThreshold) + ')'] = 0 summary['too-small-(<' + str(tooSmallThreshold) + ')'] = 0 # Creates the fieldnames variable needed to create the csv file at the end. fieldnames = [ 'Directory', 'Image', 'size-average', 'too-big-(>' + str(tooBigThreshold) + ')', 'too-small-(<' + str(tooSmallThreshold) + ')', '#nuclei', 'all-negative' ] for row in info: if row['Animal ID'] == filename.replace('s', '-').replace( 'p', '-').split('-')[0]: for key, value in row.items(): fieldnames.insert(0, key) summary[key] = value # Adds the columns for each individual marker (ignoring Dapi since it was used to count nuclei) summary["tissue-area"] = bigareas[0] fieldnames.append("tissue-area") for chan in channels: v, x = chan summary[v + "-HEMO-cells"] = 0 fieldnames.append(v + "-HEMO-cells") summary[v + "-intensity"] = intensities[x] fieldnames.append(v + "-intensity") summary[v + "-area"] = blobsarea[x] fieldnames.append(v + "-area") summary[v + "-area/tissue-area"] = blobsarea[x] / bigareas[0] fieldnames.append(v + "-area/tissue-area") summary[v + "-particles"] = blobsnuclei[x] fieldnames.append(v + "-particles") summary[v + "-cells"] = cells[x] fieldnames.append(v + "-cells") summary[v + "-particles/tissue-area"] = blobsnuclei[x] / bigareas[0] fieldnames.append(v + "-particles/tissue-area") fieldnames.append(v + "-HEMO-Cells/tissue-area") # Adds the column for colocalization between first and second marker if len(channels) > 2: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-positive') # Adds the columns for colocalization between all three markers if len(channels) > 3: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[2][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive') # Loops through each particle and adds it to each field that it is True for. areaCounter = 0 for z, area in enumerate(areas): if area > tooBigThreshold: summary['too-big-(>' + str(tooBigThreshold) + ')'] += 1 elif area < tooSmallThreshold: summary['too-small-(<' + str(tooSmallThreshold) + ')'] += 1 else: summary['#nuclei'] += 1 areaCounter += area temp = 0 for chan in channels: v, x = chan if areaFractionsArray[x][z] > areaFractionThreshold[0]: summary[chan[0] + '-HEMO-cells'] += 1 if x != 0: temp += 1 if temp == 0: summary['all-negative'] += 1 if len(channels) > 2: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[2][z] > areaFractionThreshold[2]: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] += 1 if len(channels) > 3: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[2][z] > areaFractionThreshold[2]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[1][z] > areaFractionThreshold[1]: summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 # Calculate the average of the particles sizes for chan in channels: v, x = chan summary[v + "-cells/tissue-area"] = summary[v + "-cells"] / bigareas[0] if float(summary['#nuclei']) > 0: summary['size-average'] = round(areaCounter / summary['#nuclei'], 2) if displayImages: fieldnames = ["Directory", "Image"] for chan in channels: v, x = chan summary[v + "-threshold"] = maxThresholds[x] fieldnames.append(v + "-threshold") allMaxThresholds[v + "-" + region].append(maxThresholds[x]) # Opens and appends one line on the final csv file for the subfolder (remember that this is still inside the loop that goes through each image) with open(outputName, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator='\n') if os.path.getsize(outputName) < 1: writer.writeheader() writer.writerow(summary)
def generate_background_rois(input_mask_imp, params, membrane_edges, dilations=5, threshold_method=None, membrane_imp=None): """automatically identify background region based on auto-thresholded image, existing membrane edges and position of midpoint anchor""" if input_mask_imp is None and membrane_imp is not None: segmentation_imp = Duplicator().run(membrane_imp) # do thresholding using either previous method if threhsold_method is None or using (less conservative?) threshold method if (threshold_method is None or not (threshold_method in params.listThresholdMethods())): mask_imp = make_and_clean_binary(segmentation_imp, params.threshold_method) else: mask_imp = make_and_clean_binary(segmentation_imp, threshold_method) segmentation_imp.close() else: input_mask_imp.killRoi() mask_imp = Duplicator().run(input_mask_imp) rois = [] IJ.setForegroundColor(0, 0, 0) roim = RoiManager(True) rt = ResultsTable() for fridx in range(mask_imp.getNFrames()): mask_imp.setT(fridx + 1) # add extra bit to binary mask from loaded membrane in case user refined edges... # flip midpoint anchor across the line joining the two extremes of the membrane, # and fill in the triangle made by this new point and those extremes poly = membrane_edges[fridx].getPolygon() l1 = (poly.xpoints[0], poly.ypoints[0]) l2 = (poly.xpoints[-1], poly.ypoints[-1]) M = (0.5 * (l1[0] + l2[0]), 0.5 * (l1[1] + l2[1])) Mp1 = (params.manual_anchor_midpoint[0][0] - M[0], params.manual_anchor_midpoint[0][1] - M[1]) p2 = (M[0] - Mp1[0], M[1] - Mp1[1]) new_poly_x = list(poly.xpoints) new_poly_x.append(p2[0]) new_poly_y = list(poly.ypoints) new_poly_y.append(p2[1]) mask_imp.setRoi(PolygonRoi(new_poly_x, new_poly_y, PolygonRoi.POLYGON)) IJ.run(mask_imp, "Fill", "slice") mask_imp.killRoi() # now dilate the masked image and identify the unmasked region closest to the midpoint anchor ip = mask_imp.getProcessor() dilations = 5 for d in range(dilations): ip.dilate() ip.invert() mask_imp.setProcessor(ip) mxsz = mask_imp.getWidth() * mask_imp.getHeight() pa = ParticleAnalyzer( ParticleAnalyzer.ADD_TO_MANAGER | ParticleAnalyzer.SHOW_PROGRESS, ParticleAnalyzer.CENTROID, rt, 0, mxsz) pa.setRoiManager(roim) pa.analyze(mask_imp) ds_to_anchor = [ math.sqrt((x - params.manual_anchor_midpoint[0][0])**2 + (y - params.manual_anchor_midpoint[0][1])**2) for x, y in zip( rt.getColumn(rt.getColumnIndex("X")).tolist(), rt.getColumn(rt.getColumnIndex("Y")).tolist()) ] if len(ds_to_anchor) > 0: roi = roim.getRoi(ds_to_anchor.index(min(ds_to_anchor))) rois.append(roi) else: rois.append(None) roim.reset() rt.reset() roim.close() mask_imp.close() return rois
ok = trackmate.process() if not ok: sys.exit(str(trackmate.getErrorMessage())) #---------------- # Display results #---------------- # The feature model, that stores edge and track features. fm = model.getFeatureModel() rm = RoiManager.getInstance() if not rm: rm = RoiManager() rm.reset() nextRoi = 0 for id in model.getTrackModel().trackIDs(True): # Fetch the track feature from the feature model. v = fm.getTrackFeature(id, 'TRACK_MEAN_SPEED') v1 = fm.getTrackFeature(id, TrackBranchingAnalyzer.NUMBER_SPLITS) if (v1>0): model.getLogger().log('') model.getLogger().log('Track ' + str(id) + ': branching = ' + str(v1)) track = model.getTrackModel().trackSpots(id) sortedTrack = list( track ) Collections.sort( sortedTrack, Spot.frameComparator )
pos[0] = int(nef.text) if nef.tag == 'y': pos[1] = int(nef.text) if nef.tag == 'z': pos[2] = int(nef.text) obj[int(child.attrib['ID'])] = pos return obj, imgName #close all open files and clean roimanager roim = RoiManager.getInstance() if roim is None: roim = RoiManager() IJ.run("Close All") IJ.run("Clear Results") try: roim.reset() except AttributeError: roim.runCommand("reset") #read argument when called from command line try: arg = getArgument() except: IJ.log(" ") IJ.log("Error in loading the file! Using default file!") IJ.log("Run macroscript: ./ImageJ-win64.exe -macro fcsxmlparser 'xmlfilename'") IJ.log("or ./ImageJ-win64.exe -macro fcsxmlparser 'xmlfilename -cchannelNr'") arg = 'X:\\AntonioP_t2\\RLadurner_JMPeters\\DoubleArrest\\150212_STAG2\\Mitosys2\\LSM\\DE_W0001_P0001\\DE_2_W0001_P0001_T0001\\TR1_W0001_P0001\\TR1_2_W0001_P0001_T0001.xml -c2' #split for channel argument arg = re.split('\s+-c', arg)
def process(subFolder, outputDirectory, filename): #IJ.close() imp = IJ.openImage(inputDirectory + subFolder + '/' + rreplace(filename, "_ch00.tif", ".tif")) imp.show() # Get the pixel values from the xml file for file in os.listdir(inputDirectory + subFolder): if file.endswith('.xml'): xml = os.path.join(inputDirectory + subFolder, file) xml = "C:/Users/Harris/Desktop/test_xml_for_parsing_pixel.xml" element_tree = ET.parse(xml) root = element_tree.getroot() for dimensions in root.iter('DimensionDescription'): num_pixels = int(dimensions.attrib['NumberOfElements']) if dimensions.attrib['Unit'] == "m": length = float(dimensions.attrib['Length']) * 1000000 else: length = float(dimensions.attrib['Length']) pixel_length = length / num_pixels else: pixel_length = 0.8777017 IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=" + str(pixel_length) + " pixel_height=" + str(pixel_length) + " voxel_depth=25400.0508001") ic = ImageConverter(imp) ic.convertToGray8() #IJ.setThreshold(imp, 2, 255) #Automatically selects the area of the organoid based on automated thresholding and creates a mask to be applied on #all other images IJ.setAutoThreshold(imp, "Mean dark no-reset") IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Analyze Particles...", "size=100000-Infinity add select") rm = RoiManager.getInstance() num_roi = rm.getCount() for i in num_roi: imp = getCurrentImage() rm.select(imp, i) IJ.setBackgroundColor(0, 0, 0) IJ.run(imp, "Clear Outside", "") IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Remove Outliers...", "radius=5" + " threshold=50" + " which=Dark") IJ.run(imp, "Remove Outliers...", "radius=5" + " threshold=50" + " which=Bright") # Save the mask and open it IJ.saveAs("tiff", inputDirectory + '/mask' + i) mask = IJ.openImage(inputDirectory + '/mask' + i + '.tif') if not displayImages: imp.changes = False imp.close() images = [None] * 5 intensities = [None] * 5 blobsarea = [None] * 5 blobsnuclei = [None] * 5 bigAreas = [None] * 5 imp.close() # Loop to open all the channel images for chan in channels: v, x = chan images[x] = IJ.openImage(inputDirectory + subFolder + '/' + rreplace(filename, "_ch00.tif", "_ch0" + str(x) + ".tif")) # Apply Mask on all the images and save them into an array apply_mask = ImageCalculator() images[x] = apply_mask.run("Multiply create 32 bit", mask, images[x]) ic = ImageConverter(images[x]) ic.convertToGray8() imp = images[x] # Calculate the intensities for each channel as well as the organoid area for roi in rm.getRoisAsArray(): imp.setRoi(roi) stats_i = imp.getStatistics(Measurements.MEAN | Measurements.AREA) intensities[x] = stats_i.mean bigAreas[x] = stats_i.area rm.close() # Opens the ch00 image and sets default properties #Get the pixel values from the xml file for file in os.listdir(subFolder): if file.endswith('.xml'): xml = os.path.join(inputDirectory + subFolder, file) xml = "C:/Users/Harris/Desktop/test_xml_for_parsing_pixel.xml" element_tree = ET.parse(xml) root = element_tree.getroot() for dimensions in root.iter('DimensionDescription'): num_pixels = int(dimensions.attrib['NumberOfElements']) if dimensions.attrib['Unit'] == "m": length = float(dimensions.attrib['Length']) * 1000000 else: length = float(dimensions.attrib['Length']) pixel_length = length / num_pixels else: pixel_length = 0.8777017 imp = IJ.openImage(inputDirectory + subFolder + '/' + filename) imp = apply_mask.run("Multiply create 32 bit", mask, imp) IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=" + str(pixel_length) + "pixel_height=" + str(pixel_length) + "voxel_depth=25400.0508001") # Sets the threshold and watersheds. for more details on image processing, see https://imagej.nih.gov/ij/developer/api/ij/process/ImageProcessor.html ic = ImageConverter(imp) ic.convertToGray8() IJ.run(imp, "Remove Outliers...", "radius=2" + " threshold=50" + " which=Dark") IJ.run(imp, "Gaussian Blur...", "sigma=" + str(blur)) IJ.setThreshold(imp, lowerBounds[0], 255) if displayImages: imp.show() IJ.run(imp, "Convert to Mask", "") IJ.run(imp, "Watershed", "") if not displayImages: imp.changes = False imp.close() # Counts and measures the area of particles and adds them to a table called areas. Also adds them to the ROI manager table = ResultsTable() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.setHideOutputImage(True) # imp = impM # imp.getProcessor().invert() pa.analyze(imp) areas = table.getColumn(0) # This loop goes through the remaining channels for the other markers, by replacing the ch00 at the end with its corresponding channel # It will save all the area fractions into a 2d array called areaFractionsArray areaFractionsArray = [None] * 5 for chan in channels: v, x = chan # Opens each image and thresholds imp = images[x] IJ.run( imp, "Properties...", "channels=1 slices=1 frames=1 unit=um pixel_width=0.8777017 pixel_height=0.8777017 voxel_depth=25400.0508001" ) ic = ImageConverter(imp) ic.convertToGray8() IJ.setThreshold(imp, lowerBounds[x], 255) if displayImages: imp.show() WaitForUserDialog("Title", "Adjust Threshold for Marker " + v).show() IJ.run(imp, "Convert to Mask", "") # Measures the area fraction of the new image for each ROI from the ROI manager. areaFractions = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA_FRACTION) areaFractions.append(stats.areaFraction) # Saves the results in areaFractionArray areaFractionsArray[x] = areaFractions roim.close() for chan in channels: v, x = chan imp = images[x] imp.deleteRoi() roim = RoiManager(True) ParticleAnalyzer.setRoiManager(roim) pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER, Measurements.AREA, table, 15, 9999999999999999, 0.2, 1.0) pa.analyze(imp) blobs = [] for roi in roim.getRoisAsArray(): imp.setRoi(roi) stats = imp.getStatistics(Measurements.AREA) blobs.append(stats.area) blobsarea[x] = sum( blobs ) #take this out and use intial mask tissue area from the beginning blobsnuclei[x] = len(blobs) if not displayImages: imp.changes = False imp.close() roim.reset() roim.close() imp.close() # Creates the summary dictionary which will correspond to a single row in the output csv, with each key being a column summary = {} summary['Image'] = filename summary['Directory'] = subFolder # Adds usual columns summary['size-average'] = 0 summary['#nuclei'] = 0 summary['all-negative'] = 0 summary['too-big-(>' + str(tooBigThreshold) + ')'] = 0 summary['too-small-(<' + str(tooSmallThreshold) + ')'] = 0 # Creates the fieldnames variable needed to create the csv file at the end. fieldnames = [ 'Name', 'Directory', 'Image', 'size-average', 'too-big-(>' + str(tooBigThreshold) + ')', 'too-small-(<' + str(tooSmallThreshold) + ')', '#nuclei', 'all-negative' ] # Adds the columns for each individual marker (ignoring Dapi since it was used to count nuclei) summary["organoid-area"] = bigAreas[x] fieldnames.append("organoid-area") for chan in channels: v, x = chan summary[v + "-positive"] = 0 fieldnames.append(v + "-positive") summary[v + "-intensity"] = intensities[x] fieldnames.append(v + "-intensity") summary[v + "-blobsarea"] = blobsarea[x] fieldnames.append(v + "-blobsarea") summary[v + "-blobsnuclei"] = blobsnuclei[x] fieldnames.append(v + "-blobsnuclei") # Adds the column for colocalization between first and second marker if len(channels) > 2: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-positive') # Adds the columns for colocalization between all three markers if len(channels) > 3: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] = 0 fieldnames.append(channels[1][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[2][0] + '-' + channels[3][0] + '-positive') fieldnames.append(channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive') # Loops through each particle and adds it to each field that it is True for. areaCounter = 0 for z, area in enumerate(areas): log.write(str(area)) log.write("\n") if area > tooBigThreshold: summary['too-big-(>' + str(tooBigThreshold) + ')'] += 1 elif area < tooSmallThreshold: summary['too-small-(<' + str(tooSmallThreshold) + ')'] += 1 else: summary['#nuclei'] += 1 areaCounter += area temp = 0 for chan in channels: v, x = chan if areaFractionsArray[x][z] > areaFractionThreshold[ 0]: # theres an error here im not sure why. i remember fixing it before summary[chan[0] + '-positive'] += 1 if x != 0: temp += 1 if temp == 0: summary['all-negative'] += 1 if len(channels) > 2: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[2][z] > areaFractionThreshold[2]: summary[channels[1][0] + '-' + channels[2][0] + '-positive'] += 1 if len(channels) > 3: if areaFractionsArray[1][z] > areaFractionThreshold[1]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[1][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[2][z] > areaFractionThreshold[2]: if areaFractionsArray[3][z] > areaFractionThreshold[3]: summary[channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 if areaFractionsArray[1][z] > areaFractionThreshold[1]: summary[channels[1][0] + '-' + channels[2][0] + '-' + channels[3][0] + '-positive'] += 1 # Calculate the average of the particles sizes if float(summary['#nuclei']) > 0: summary['size-average'] = round(areaCounter / summary['#nuclei'], 2) # Opens and appends one line on the final csv file for the subfolder (remember that this is still inside the loop that goes through each image) with open(outputDirectory + "/" + outputName + ".csv", 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', lineterminator='\n') if os.path.getsize(outputDirectory + "/" + outputName + ".csv") < 1: writer.writeheader() writer.writerow(summary) IJ.run(imp, "Close All", "")
def main(): # Get active dataset #img = IJ.getImage() display = displayservice.getActiveDisplay() active_dataset = imagedisplayservice.getActiveDataset(display) if not active_dataset: IJ.showMessage('No image opened.') return # Get image path fname = active_dataset.getSource() dir_path = os.path.dirname(fname) if not fname: IJ.showMessage('Source image needs to match a file on the system.') return # Open ROIs rois = RoiManager.getInstance() if not rois: roi_path = os.path.join(dir_path, "RoiSet.zip") if not os.path.isfile(roi_path): try: roi_path = glob.glob(os.path.join(dir_path, "*.roi"))[0] except: roi_path = None if not roi_path: IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...') return rois = RoiManager(True) rois.reset() rois.runCommand("Open", roi_path) IJ.log('Image filename is %s' % fname) dt = get_dt(active_dataset) rois_array = rois.getRoisAsArray() for i, roi in enumerate(rois_array): crop_id = i + 1 IJ.log("Croping %i / %i" % (crop_id, len(rois_array))) # Get filename and basename of the current cropped image crop_basename = "crop%i_%s" % (crop_id, active_dataset.getName()) crop_basename = os.path.splitext(crop_basename)[0] + ".ome.tif" crop_fname = os.path.join(os.path.dirname(fname), crop_basename) # Get bounds and crop bounds = roi.getBounds() dataset = crop(ij, datasetservice, active_dataset, bounds.x, bounds.y, bounds.width, bounds.height, crop_basename) # Show cropped image ij.ui().show(dataset.getName(), dataset) # Save cropped image (ugly hack) IJ.log("Saving crop to %s" % crop_fname) imp = IJ.getImage() bfExporter = LociExporter() macroOpts = "save=[" + crop_fname + "]" bfExporter.setup(None, imp) Macro.setOptions(macroOpts) bfExporter.run(None) imp.close() IJ.log('Done')
def segmentation(imp, spot_data, channel, diameter_init, ES_tolerance, ES_area_max, ES_ctrl_pts, ES_iteration, repeat_max): # Open files cal = imp.getCalibration() manager = RoiManager.getInstance() if manager is None: manager = RoiManager() # Prepare log files for output options = IS.MEDIAN | IS.AREA | IS.MIN_MAX | IS.CENTROID | IS.PERIMETER | IS.ELLIPSE | IS.SKEWNESS convergence = [] Sintensity = [] for spot in spot_data: repeat = 0 flag = False spotID = int(spot[0]) Xcenter = (float(spot[1]) / cal.pixelWidth) Ycenter = (float(spot[2]) / cal.pixelHeight) Quality = float(spot[3]) diameter_init = float(spot[4] / cal.pixelWidth) * 2.0 while True: manager = RoiManager.getInstance() if manager is None: manager = RoiManager() Xcurrent = int(Xcenter - diameter_init / 2.0) Ycurrent = int(Ycenter - diameter_init / 2.0) Dcurrent1 = int(diameter_init * (1.2 - repeat / 10.0)) Dcurrent2 = int(diameter_init * (0.8 + repeat / 10.0)) roi = OvalRoi(Xcurrent, Ycurrent, Dcurrent1, Dcurrent2) imp.setPosition(channel) imp.setRoi(roi) Esnake_options1 = "target_brightness=Bright control_points=" + \ str(ES_ctrl_pts) + " gaussian_blur=0 " Esnake_options2 = "energy_type=Contour alpha=2.0E-5 max_iterations=" + \ str(ES_iteration) + " immortal=false" IJ.run(imp, "E-Snake", Esnake_options1 + Esnake_options2) roi_snake = manager.getRoisAsArray() roi_ind = len(roi_snake) - 1 stats = IS.getStatistics( imp.getProcessor(), options, imp.getCalibration()) perimeter = roi_snake[roi_ind].getLength() * cal.pixelWidth circularity = 4.0 * 3.1417 * (stats.area / (perimeter * perimeter)) if stats.area > 17.0 and stats.area < ES_area_max and stats.skewness < -0.01 and circularity > 0.01 and stats.minor > 2.0 and boundaries(Xcenter, Ycenter, stats.xCentroid / cal.pixelWidth, stats.yCentroid / cal.pixelHeight, ES_tolerance): Sintensity = stats.median convergence.append(True) break if stats.median > 6000 and stats.area > 17.0 and stats.area < ES_area_max: Sintensity = stats.median convergence.append(True) break elif repeat > repeat_max: manager.select(imp, roi_ind) manager.runCommand(imp, 'Delete') roi = OvalRoi(Xcenter + 1.0 - diameter_init / 2.0, Ycenter + 1.0 - diameter_init / 2.0, diameter_init, diameter_init) imp.setRoi(roi) manager.add(imp, roi, spotID) roi_snake.append(roi) stats = IS.getStatistics( imp.getProcessor(), options, imp.getCalibration()) Sintensity = stats.median convergence.append(False) break else: IJ.log('Area=' + str(stats.area) + ' Skewness=' + str(stats.skewness) + ' circularity=' + str(circularity) + ' Minor=' + str(stats.minor)) manager.select(imp, roi_ind) manager.runCommand(imp, 'Delete') repeat += 1 # End Spot-segmentation # End all Spots-segmentation manager.runCommand(imp, 'Show All') imp.setPosition(channel) color = imp.createImagePlus() ip = imp.getProcessor().duplicate() color.setProcessor("segmentation" + str(channel), ip) color.show() IJ.selectWindow("segmentation" + str(channel)) manager.moveRoisToOverlay(color) spot_optimal = manager.getRoisAsArray() manager.reset() for i in xrange(0, len(spot_optimal)): spot = spot_optimal[i] spot.setStrokeWidth(2) if convergence[i]: spot.setStrokeColor(Color.GREEN) else: spot.setStrokeColor(Color.MAGENTA) imp.setRoi(spot) manager.add(imp, spot, i) manager.runCommand(imp, 'Show All') imp.setPosition(channel)
def channel_segmentation(infile, diameter, tolerance, repeat_max, Zrepeat=10): # ROI optimization by Esnake optimisation default_options = "stack_order=XYCZT color_mode=Grayscale view=Hyperstack" IJ.run("Bio-Formats Importer", default_options + " open=[" + infile + "]") imp = IJ.getImage() cal = imp.getCalibration() channels = [i for i in xrange(1, imp.getNChannels() + 1)] log = filename(infile) log = re.sub('.ids', '.csv', log) XZdrift, YZdrift = retrieve_Zdrift(log) XZpt = [i * imp.getWidth() / Zrepeat for i in xrange(1, Zrepeat - 1)] YZpt = [i * imp.getHeight() / Zrepeat for i in xrange(1, Zrepeat - 1)] # Prepare head output file for ch in channels: csv_name = 'ch' + str(ch) + log with open(os.path.join(folder6, csv_name), 'wb') as outfile: SegLog = csv.writer(outfile, delimiter=',') SegLog.writerow(['spotID', 'Xpos', 'Ypos', 'Zpos', 'Quality', 'area', 'intensity', 'min', 'max', 'std']) # Retrieve seeds from SpotDetector options = IS.MEDIAN | IS.AREA | IS.MIN_MAX | IS.CENTROID spots = retrieve_seeds(log) for ch in channels: for spot in spots: repeat = 0 # Spots positions are given according to calibration, need to # convert it to pixel coordinates spotID = int(spot[0]) Xcenter = int(float(spot[2]) / cal.pixelWidth) Ycenter = int(float(spot[3]) / cal.pixelHeight) Zcenter = float(spot[4]) / cal.pixelDepth Quality = float(spot[5]) # find closest grid location in Zdrift matrix Xpt = min(range(len(XZpt)), key=lambda i: abs(XZpt[i] - Xcenter)) Ypt = min(range(len(YZpt)), key=lambda i: abs(YZpt[i] - Ycenter)) # Calculate Z position according to SpotZ, calibration and # channel-specific Zdrift # Zshift = median([float(XZdrift[Xpt][ch - 1]), float(YZdrift[Ypt][ch - 1])]) / cal.pixelDepth correctZ = int(Zcenter - Zshift) imp.setPosition(ch, correctZ, 1) imp.getProcessor().setMinAndMax(0, 3000) while True: manager = RoiManager.getInstance() if manager is None: manager = RoiManager() roi = OvalRoi(Xcenter - diameter * (1.0 + repeat / 10.0) / 2.0, Ycenter - diameter * ( 1.0 + repeat / 10.0) / 2.0, diameter * (1.0 + repeat / 10.0), diameter * (1.0 + repeat / 10.0)) imp.setRoi(roi) IJ.run(imp, "E-Snake", "target_brightness=Bright control_points=3 gaussian_blur=0 energy_type=Mixture alpha=2.0E-5 max_iterations=20 immortal=false") roi_snake = manager.getRoisAsArray()[0] imp.setRoi(roi_snake) stats = IS.getStatistics( imp.getProcessor(), options, imp.getCalibration()) manager.reset() if stats.area > 20.0 and stats.area < 150.0 and boundaries(Xcenter, Ycenter, stats.xCentroid / cal.pixelWidth, stats.yCentroid / cal.pixelHeight, tolerance): Sarea = stats.area Sintensity = stats.median Smin = stats.min Smax = stats.max Sstd = stats.stdDev break elif repeat > repeat_max: roi = OvalRoi(Xcenter - diameter / 2.0, Ycenter - diameter / 2.0, diameter, diameter) imp.setRoi(roi) manager.add(imp, roi, i) stats = IS.getStatistics( imp.getProcessor(), options, imp.getCalibration()) Sarea = stats.area Sintensity = stats.median Smin = stats.min Smax = stats.max Sstd = stats.stdDev break else: repeat += 1 # Save results csv_name = 'ch' + str(ch) + log with open(os.path.join(folder6, csv_name), 'ab') as outfile: SegLog = csv.writer(outfile, delimiter=',') SegLog.writerow([spotID, Xcenter, Ycenter, correctZ, Quality, Sarea, Sintensity, Smin, Smax, Sstd]) # End spot optimization # End spots # End channels IJ.selectWindow(filename(infile)) IJ.run("Close")
def run(): IJ.run("Close All", "") IJ.log("\\Clear") IJ.log("Find_close_peaks") imp = IJ.run("Bio-Formats Importer") imp = IJ.getImage() Channel_1, Channel_2, radius_background, sigmaSmaller, sigmaLarger, minPeakValue, min_dist = getOptions() IJ.log("option used:" \ + "\n" + "channel 1:" + str(Channel_1) \ + "\n" + "channel 2:"+ str(Channel_2) \ + "\n" + "Radius Background:"+ str(radius_background) \ + "\n" + "Smaller Sigma:"+ str(sigmaSmaller) \ + "\n" + "Larger Sigma:"+str(sigmaLarger) \ + "\n" + "Min Peak Value:"+str(minPeakValue) \ + "\n" + "Min dist between peaks:"+str(min_dist)) IJ.log("Computing Max Intensity Projection") if imp.getDimensions()[3] > 1: imp_max = ZProjector.run(imp,"max") #imp_max = IJ.run("Z Project...", "projection=[Max Intensity]") #imp_max = IJ.getImage() else: imp_max = imp ip1, ip2 = extract_channel(imp_max, Channel_1, Channel_2) imp1, imp2 = back_substraction(ip1, ip2, radius_background) imp1.show() imp2.show() IJ.log("Finding Peaks") ip1_1, ip2_1, peaks_1, peaks_2 = find_peaks(imp1, imp2, sigmaSmaller, sigmaLarger, minPeakValue) # Create a PointRoi from the DoG peaks, for visualization roi_1 = PointRoi(0, 0) roi_2 = PointRoi(0, 0) roi_3 = PointRoi(0, 0) roi_4 = PointRoi(0, 0) # A temporary array of integers, one per dimension the image has p_1 = zeros(ip1_1.numDimensions(), 'i') p_2 = zeros(ip2_1.numDimensions(), 'i') # Load every peak as a point in the PointRoi for peak in peaks_1: # Read peak coordinates into an array of integers peak.localize(p_1) roi_1.addPoint(imp1, p_1[0], p_1[1]) for peak in peaks_2: # Read peak coordinates into an array of integers peak.localize(p_2) roi_2.addPoint(imp2, p_2[0], p_2[1]) # Chose minimum distance in pixel #min_dist = 20 for peak_1 in peaks_1: peak_1.localize(p_1) for peak_2 in peaks_2: peak_2.localize(p_2) d1 = distance(p_1, p_2) if d1 < min_dist: roi_3.addPoint(imp1, p_2[0], p_2[1]) break for peak_2 in peaks_2: peak_2.localize(p_2) for peak_1 in peaks_1: peak_1.localize(p_1) d2 = distance(p_2, p_1) if d2 < min_dist: roi_4.addPoint(imp1, p_2[0], p_2[1]) break rm = RoiManager.getInstance() if not rm: rm = RoiManager() rm.reset() rm.addRoi(roi_1) rm.addRoi(roi_2) rm.addRoi(roi_3) rm.addRoi(roi_4) rm.select(0) rm.rename(0, "ROI neuron") rm.runCommand("Set Color", "yellow") rm.select(1) rm.rename(1, "ROI glioma") rm.runCommand("Set Color", "blue") rm.select(2) rm.rename(2, "ROI glioma touching neurons") rm.runCommand("Set Color", "red") rm.select(3) rm.rename(3, "ROI neurons touching glioma") rm.runCommand("Set Color", "green") rm.runCommand(imp1, "Show All") #Change distance to be in um cal = imp.getCalibration() min_distance = str(round((cal.pixelWidth * min_dist),1)) table = ResultsTable() table.incrementCounter() table.addValue("Numbers of Neuron Markers", roi_1.getCount(0)) table.addValue("Numbers of Glioma Markers", roi_2.getCount(0)) table.addValue("Numbers of Glioma within %s um of Neurons" %(min_distance), roi_3.getCount(0)) table.addValue("Numbers of Neurons within %s um of Glioma" %(min_distance), roi_4.getCount(0)) table.show("Results Analysis")