def writeCSV(filePath, results, header): """ Write a table as an csv file """ rt = ResultsTable() for i in range(len(results[1])): rt.incrementCounter() for j in range(len(results)): rt.addValue(str(header[j]), results[j][i]) rt.show("Results") rt.saveAs(filePath)
def writeCSV(filePath, results, header): """ Write a table as an csv file """ rt = ResultsTable() for i in range(len(results[1])): rt.incrementCounter() for j in range(len(results)): rt.addValue(str(header[j]), results[j][i]) rt.show("Results") rt.saveAs(filePath);
def show_as_table(title, data, order=[]): """Helper function to display group and data information as a ResultsTable""" table = ResultsTable() for d in data: table.incrementCounter() order = [k for k in order] order.extend([k for k in d.keys() if not d in order]) for k in order: table.addValue(k, d[k]) table.show(title)
def myResults(results): myResultsTable = ResultsTable() for idx, graph in enumerate(results.getGraph()): for edge in graph.getEdges(): edgeLength = edge.getLength() v1 = edge.getV1() v2 = edge.getV2() dist = euclideanDistance(v1, v2) #print('v1:', type(v1), v1.getPoints()) # myResultsTable.incrementCounter() # add a row to results table myResultsTable.addValue('graphID', idx) myResultsTable.addValue('length_3d', edgeLength) myResultsTable.addValue('dist', dist) if dist > 0: myResultsTable.addValue('tort', edgeLength / dist) else: myResultsTable.addValue('tort', 'inf') myResultsTable.setPrecision(6) myResultsTable.show('samiSkel_results')
def run(imp, preprocessor_path, postprocessor_path, threshold_method, user_comment): output_parameters = { "image title": "", "preprocessor path": float, "post processor path": float, "thresholding op": float, "use ridge detection": bool, "high contrast": int, "low contrast": int, "line width": int, "minimum line length": int, "mitochondrial footprint": float, "branch length mean": float, "branch length median": float, "branch length stdevp": float, "summed branch lengths mean": float, "summed branch lengths median": float, "summed branch lengths stdevp": float, "network branches mean": float, "network branches median": float, "network branches stdevp": float } output_order = [ "image title", "preprocessor path", "post processor path", "thresholding op", "use ridge detection", "high contrast", "low contrast", "line width", "minimum line length", "mitochondrial footprint", "branch length mean", "branch length median", "branch length stdevp", "summed branch lengths mean", "summed branch lengths median", "summed branch lengths stdevp", "network branches mean", "network branches median", "network branches stdevp" ] # Perform any preprocessing steps... status.showStatus("Preprocessing image...") if preprocessor_path != None: if preprocessor_path.exists(): preprocessor_thread = scripts.run(preprocessor_path, True) preprocessor_thread.get() imp = WindowManager.getCurrentImage() else: pass # Store all of the analysis parameters in the table if preprocessor_path == None: preprocessor_str = "" else: preprocessor_str = preprocessor_path.getCanonicalPath() if postprocessor_path == None: postprocessor_str = "" else: postprocessor_str = preprocessor_path.getCanonicalPath() output_parameters["preprocessor path"] = preprocessor_str output_parameters["post processor path"] = postprocessor_str output_parameters["thresholding op"] = threshold_method output_parameters["use ridge detection"] = str(use_ridge_detection) output_parameters["high contrast"] = rd_max output_parameters["low contrast"] = rd_min output_parameters["line width"] = rd_width output_parameters["minimum line length"] = rd_length # Create and ImgPlus copy of the ImagePlus for thresholding with ops... status.showStatus("Determining threshold level...") imp_title = imp.getTitle() slices = imp.getNSlices() frames = imp.getNFrames() output_parameters["image title"] = imp_title imp_calibration = imp.getCalibration() imp_channel = Duplicator().run(imp, imp.getChannel(), imp.getChannel(), 1, slices, 1, frames) img = ImageJFunctions.wrap(imp_channel) # Determine the threshold value if not manual... binary_img = ops.run("threshold.%s" % threshold_method, img) binary = ImageJFunctions.wrap(binary_img, 'binary') binary.setCalibration(imp_calibration) binary.setDimensions(1, slices, 1) # Get the total_area if binary.getNSlices() == 1: area = binary.getStatistics(Measurements.AREA).area area_fraction = binary.getStatistics( Measurements.AREA_FRACTION).areaFraction output_parameters[ "mitochondrial footprint"] = area * area_fraction / 100.0 else: mito_footprint = 0.0 for slice in range(binary.getNSlices()): binary.setSliceWithoutUpdate(slice) area = binary.getStatistics(Measurements.AREA).area area_fraction = binary.getStatistics( Measurements.AREA_FRACTION).areaFraction mito_footprint += area * area_fraction / 100.0 output_parameters[ "mitochondrial footprint"] = mito_footprint * imp_calibration.pixelDepth # Generate skeleton from masked binary ... # Generate ridges first if using Ridge Detection if use_ridge_detection and (imp.getNSlices() == 1): skeleton = ridge_detect(imp, rd_max, rd_min, rd_width, rd_length) else: skeleton = Duplicator().run(binary) IJ.run(skeleton, "Skeletonize (2D/3D)", "") # Analyze the skeleton... status.showStatus("Setting up skeleton analysis...") skel = AnalyzeSkeleton_() skel.setup("", skeleton) status.showStatus("Analyzing skeleton...") skel_result = skel.run() status.showStatus("Computing graph based parameters...") branch_lengths = [] summed_lengths = [] graphs = skel_result.getGraph() for graph in graphs: summed_length = 0.0 edges = graph.getEdges() for edge in edges: length = edge.getLength() branch_lengths.append(length) summed_length += length summed_lengths.append(summed_length) output_parameters["branch length mean"] = eztables.statistical.average( branch_lengths) output_parameters["branch length median"] = eztables.statistical.median( branch_lengths) output_parameters["branch length stdevp"] = eztables.statistical.stdevp( branch_lengths) output_parameters[ "summed branch lengths mean"] = eztables.statistical.average( summed_lengths) output_parameters[ "summed branch lengths median"] = eztables.statistical.median( summed_lengths) output_parameters[ "summed branch lengths stdevp"] = eztables.statistical.stdevp( summed_lengths) branches = list(skel_result.getBranches()) output_parameters["network branches mean"] = eztables.statistical.average( branches) output_parameters["network branches median"] = eztables.statistical.median( branches) output_parameters["network branches stdevp"] = eztables.statistical.stdevp( branches) # Create/append results to a ResultsTable... status.showStatus("Display results...") if "Mito Morphology" in list(WindowManager.getNonImageTitles()): rt = WindowManager.getWindow( "Mito Morphology").getTextPanel().getOrCreateResultsTable() else: rt = ResultsTable() rt.incrementCounter() for key in output_order: rt.addValue(key, str(output_parameters[key])) # Add user comments intelligently if user_comment != None and user_comment != "": if "=" in user_comment: comments = user_comment.split(",") for comment in comments: rt.addValue(comment.split("=")[0], comment.split("=")[1]) else: rt.addValue("Comment", user_comment) rt.show("Mito Morphology") # Create overlays on the original ImagePlus and display them if 2D... if imp.getNSlices() == 1: status.showStatus("Generate overlays...") IJ.run(skeleton, "Green", "") IJ.run(binary, "Magenta", "") skeleton_ROI = ImageRoi(0, 0, skeleton.getProcessor()) skeleton_ROI.setZeroTransparent(True) skeleton_ROI.setOpacity(1.0) binary_ROI = ImageRoi(0, 0, binary.getProcessor()) binary_ROI.setZeroTransparent(True) binary_ROI.setOpacity(0.25) overlay = Overlay() overlay.add(binary_ROI) overlay.add(skeleton_ROI) imp.setOverlay(overlay) imp.updateAndDraw() # Generate a 3D model if a stack if imp.getNSlices() > 1: univ = Image3DUniverse() univ.show() pixelWidth = imp_calibration.pixelWidth pixelHeight = imp_calibration.pixelHeight pixelDepth = imp_calibration.pixelDepth # Add end points in yellow end_points = skel_result.getListOfEndPoints() end_point_list = [] for p in end_points: end_point_list.append( Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addIcospheres(end_point_list, Color3f(255.0, 255.0, 0.0), 2, 1 * pixelDepth, "endpoints") # Add junctions in magenta junctions = skel_result.getListOfJunctionVoxels() junction_list = [] for p in junctions: junction_list.append( Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addIcospheres(junction_list, Color3f(255.0, 0.0, 255.0), 2, 1 * pixelDepth, "junctions") # Add the lines in green graphs = skel_result.getGraph() for graph in range(len(graphs)): edges = graphs[graph].getEdges() for edge in range(len(edges)): branch_points = [] for p in edges[edge].getSlabs(): branch_points.append( Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addLineMesh(branch_points, Color3f(0.0, 255.0, 0.0), "branch-%s-%s" % (graph, edge), True) # Add the surface univ.addMesh(binary) univ.getContent("binary").setTransparency(0.5) # Perform any postprocessing steps... status.showStatus("Running postprocessing...") if postprocessor_path != None: if postprocessor_path.exists(): postprocessor_thread = scripts.run(postprocessor_path, True) postprocessor_thread.get() else: pass status.showStatus("Done analysis!")
def batch_open_Rois(pathRoi, file_typeRoi=None, name_filterRoi=None, recursive=False): '''Open all files in the given folder. :param path: The path from were to open the Rois. String and java.io.File are allowed. :param file_type: Only accept files with the given extension (default: None). :param name_filter: Reject files that contain the given string (default: wild characters). :param recursive: Process directories recursively (default: False). ''' # Converting a File object to a string. if isinstance(pathRoi, File): pathRoi = pathRoi.getAbsolutePath() def check_type(string): '''This function is used to check the file type. It is possible to use a single string or a list/tuple of strings as filter. This function can access the variables of the surrounding function. :param string: The filename to perform the check on. ''' if file_typeRoi: # The first branch is used if file_type is a list or a tuple. if isinstance(file_typeRoi, (list, tuple)): for file_type_ in file_typeRoi: if string.endswith(file_type_): # Exit the function with True. return True else: # Next iteration of the for loop. continue # The second branch is used if file_type is a string. elif isinstance(file_typeRoi, string): if string.endswith(file_typeRoi): return True else: return False return False # Accept all files if file_type is None. else: return True # We collect all files to open in a list. path_to_Roi = [] # Replacing some abbreviations (e.g. $HOME on Linux). path = os.path.expanduser(pathRoi) # If we don't want a recursive search, we can use os.listdir(). if not recursive: for file_name in os.listdir(pathRoi): full_path = os.path.join(pathRoi, file_name) if os.path.isfile(full_path): if check_type(file_name): path_to_Roi.append(full_path) # For a recursive search os.walk() is used. else: # os.walk() is iterable. # Each iteration of the for loop processes a different directory. # the first return value represents the current directory. # The second return value is a list of included directories. # The third return value is a list of included files. for directory, dir_names, file_names in os.walk(pathRoi): # We are only interested in files. for file_name in file_names: # The list contains only the file names. # The full path needs to be reconstructed. full_path = os.path.join(directory, file_name) # Both checks are performed to filter the files. if check_type(file_name): # Add the file to the list of Rois to open. path_to_Roi.append([ full_path, os.path.basename(os.path.splitext(full_path)[0]) ]) # Create the list that will be returned by this function. RoisX = [] RoisY = [] print('path', path_to_Roi) for roi_path in path_to_Roi: print('path', roi_path) # An object equals True and None equals False. rm = RoiManager.getInstance() if (rm == None): rm = RoiManager() Roi = IJ.open(roi_path) roi_points = rm.getRoisAsArray() table = ResultsTable() for Roi in roi_points: xpoints = Roi.getPolygon().xpoints ypoints = Roi.getPolygon().ypoints for i in range(len(xpoints)): table.incrementCounter() table.addValue("Index", i) table.addValue("X", xpoints[i]) table.addValue("Y", ypoints[i]) table.show("XY-Coordinates") return roi_points
print MeanChannel1 print MeanChannel2 print MeanChannel3 print NormChannel1 print NormChannel2 print NormChannel3 print XYCoordinates print Distance print Velocity ort = ResultsTable() ort.setPrecision(3) print ort.getCounter count = len(MeanChannel1) for i in range(count): ort.incrementCounter() ort.addValue("Frame", i) ort.addValue("Channel 1", MeanChannel1[i]) ort.addValue("Channel 2", MeanChannel2[i]) if threecolour: ort.addValue("Channel 3", MeanChannel3[i]) ort.addValue("NormCh 1", NormChannel1[i]) ort.addValue("NormCh 2", NormChannel2[i]) if threecolour: ort.addValue("NormCh 3", NormChannel3[i]) ort.addValue("XY coordinates", str(XYCoordinates[i])) ort.addValue("Distance in um", str((Distance[i]*Pixelsize))) ort.addValue("Velocity in um/s", str((Velocity[i]*Pixelsize))) ort.show("Measured intensities")
def runScript(): # find table with trajectories rt = findResultsTable(inputTableName) if rt == None: print("Results table window titled [" + inputTableName + "] not found!") return # get input image and its properties img = WindowManager.getCurrentImage() if img == None: print("Could not access input image!") return print("Processing image:", img) xLen = img.getWidth() yLen = img.getHeight() zLen = img.getNSlices() noOfFrames = img.getNFrames() noOfChannels = img.getNChannels() stack = img.getStack() if (noOfChannels > 1): print( "Cannot process images with channels. Convert image to single channel first!" ) return # Start processin data row by row... numOfRows = rt.getCounter() if numOfRows > 1: #create output tableName if showOutputTable: outputTable = ResultsTable() # if output in csv format requested print header if printOutputData: print("trajectory;frame;m0;sizeInPixels;avgIntensity") for idx in range(0, numOfRows): trajectoryId = rt.getValue("Trajectory", idx) x = rt.getValue("x", idx) y = rt.getValue("y", idx) z = rt.getValue("z", idx) frame = rt.getValue("Frame", idx) m0, size, avgInt = getIntensityData(stack, radius, frame, x, y, z, xLen, yLen, zLen) # if output in csv format requested print it if printOutputData: print( str(int(trajectoryId)) + ";" + str(frame) + ";" + str(m0) + ";" + str(size) + ";" + str(avgInt)) # if output table requested update it with data if showOutputTable: outputTable.incrementCounter() outputTable.addValue("", idx + 1) outputTable.addValue("trajectory", int(trajectoryId)) outputTable.addValue("frame", frame) outputTable.addValue("m0", m0) outputTable.addValue("sizeInPixels", size) outputTable.addValue("avgIntensity", avgInt) if showOutputTable: outputTable.show(outputTableName)
def process(srcDir, dstDir, currentDir, fileName, keepDirectories, Channel_1, Channel_2, radius_background, sigmaSmaller, sigmaLarger, minPeakValue, min_dist): IJ.run("Close All", "") # Opening the image IJ.log("Open image file:" + fileName) #imp = IJ.openImage(os.path.join(currentDir, fileName)) #imp = IJ.getImage() imp = BF.openImagePlus(os.path.join(currentDir, fileName)) imp = imp[0] # getDimensions(width, height, channels, slices, frames) IJ.log("Computing Max Intensity Projection") if imp.getDimensions()[3] > 1: imp_max = ZProjector.run(imp,"max") else: imp_max = imp ip1, ip2 = extract_channel(imp_max, Channel_1, Channel_2) IJ.log("Substract background") imp1, imp2 = back_substraction(ip1, ip2, radius_background) IJ.log("Finding Peaks") ip1_1, ip2_1, peaks_1, peaks_2 = find_peaks(imp1, imp2, sigmaSmaller, sigmaLarger, minPeakValue) # Create a PointRoi from the DoG peaks, for visualization roi_1 = PointRoi(0, 0) roi_2 = PointRoi(0, 0) roi_3 = PointRoi(0, 0) roi_4 = PointRoi(0, 0) # A temporary array of integers, one per dimension the image has p_1 = zeros(ip1_1.numDimensions(), 'i') p_2 = zeros(ip2_1.numDimensions(), 'i') # Load every peak as a point in the PointRoi for peak in peaks_1: # Read peak coordinates into an array of integers peak.localize(p_1) roi_1.addPoint(imp1, p_1[0], p_1[1]) for peak in peaks_2: # Read peak coordinates into an array of integers peak.localize(p_2) roi_2.addPoint(imp2, p_2[0], p_2[1]) # Chose minimum distance in pixel #min_dist = 20 for peak_1 in peaks_1: peak_1.localize(p_1) for peak_2 in peaks_2: peak_2.localize(p_2) d1 = distance(p_1, p_2) if d1 < min_dist: roi_3.addPoint(imp1, p_2[0], p_2[1]) break for peak_2 in peaks_2: peak_2.localize(p_2) for peak_1 in peaks_1: peak_1.localize(p_1) d2 = distance(p_2, p_1) if d2 < min_dist: roi_4.addPoint(imp1, p_2[0], p_2[1]) break cal = imp.getCalibration() min_distance = str(round((cal.pixelWidth * min_dist),1)) table = ResultsTable() table.incrementCounter() table.addValue("Numbers of Neuron Markers", roi_1.getCount(0)) table.addValue("Numbers of Glioma Markers", roi_2.getCount(0)) table.addValue("Numbers of Glioma within %s um of Neurons" %(min_distance), roi_3.getCount(0)) table.addValue("Numbers of Neurons within %s um of Glioma" %(min_distance), roi_4.getCount(0)) #table.show("Results Analysis") saveDir = currentDir.replace(srcDir, dstDir) if keepDirectories else dstDir if not os.path.exists(saveDir): os.makedirs(saveDir) IJ.log("Saving to" + saveDir) table.save(os.path.join(saveDir, fileName + ".csv")) IJ.selectWindow("Log") IJ.saveAs("Text", os.path.join(saveDir, fileName + ".csv"));
def run(imp, preprocessor_path, postprocessor_path, threshold_method, user_comment): output_parameters = {"image title" : "", "preprocessor path" : float, "post processor path" : float, "thresholding op" : float, "use ridge detection" : bool, "high contrast" : int, "low contrast" : int, "line width" : int, "minimum line length" : int, "mitochondrial footprint" : float, "branch length mean" : float, "branch length median" : float, "branch length stdevp" : float, "summed branch lengths mean" : float, "summed branch lengths median" : float, "summed branch lengths stdevp" : float, "network branches mean" : float, "network branches median" : float, "network branches stdevp" : float} output_order = ["image title", "preprocessor path", "post processor path", "thresholding op", "use ridge detection", "high contrast", "low contrast", "line width", "minimum line length", "mitochondrial footprint", "branch length mean", "branch length median", "branch length stdevp", "summed branch lengths mean", "summed branch lengths median", "summed branch lengths stdevp", "network branches mean", "network branches median", "network branches stdevp"] # Perform any preprocessing steps... status.showStatus("Preprocessing image...") if preprocessor_path != None: if preprocessor_path.exists(): preprocessor_thread = scripts.run(preprocessor_path, True) preprocessor_thread.get() imp = WindowManager.getCurrentImage() else: pass # Store all of the analysis parameters in the table if preprocessor_path == None: preprocessor_str = "" else: preprocessor_str = preprocessor_path.getCanonicalPath() if postprocessor_path == None: postprocessor_str = "" else: postprocessor_str = preprocessor_path.getCanonicalPath() output_parameters["preprocessor path"] = preprocessor_str output_parameters["post processor path"] = postprocessor_str output_parameters["thresholding op"] = threshold_method output_parameters["use ridge detection"] = str(use_ridge_detection) output_parameters["high contrast"] = rd_max output_parameters["low contrast"] = rd_min output_parameters["line width"] = rd_width output_parameters["minimum line length"] = rd_length # Create and ImgPlus copy of the ImagePlus for thresholding with ops... status.showStatus("Determining threshold level...") imp_title = imp.getTitle() slices = imp.getNSlices() frames = imp.getNFrames() output_parameters["image title"] = imp_title imp_calibration = imp.getCalibration() imp_channel = Duplicator().run(imp, imp.getChannel(), imp.getChannel(), 1, slices, 1, frames) img = ImageJFunctions.wrap(imp_channel) # Determine the threshold value if not manual... binary_img = ops.run("threshold.%s"%threshold_method, img) binary = ImageJFunctions.wrap(binary_img, 'binary') binary.setCalibration(imp_calibration) binary.setDimensions(1, slices, 1) # Get the total_area if binary.getNSlices() == 1: area = binary.getStatistics(Measurements.AREA).area area_fraction = binary.getStatistics(Measurements.AREA_FRACTION).areaFraction output_parameters["mitochondrial footprint"] = area * area_fraction / 100.0 else: mito_footprint = 0.0 for slice in range(binary.getNSlices()): binary.setSliceWithoutUpdate(slice) area = binary.getStatistics(Measurements.AREA).area area_fraction = binary.getStatistics(Measurements.AREA_FRACTION).areaFraction mito_footprint += area * area_fraction / 100.0 output_parameters["mitochondrial footprint"] = mito_footprint * imp_calibration.pixelDepth # Generate skeleton from masked binary ... # Generate ridges first if using Ridge Detection if use_ridge_detection and (imp.getNSlices() == 1): skeleton = ridge_detect(imp, rd_max, rd_min, rd_width, rd_length) else: skeleton = Duplicator().run(binary) IJ.run(skeleton, "Skeletonize (2D/3D)", "") # Analyze the skeleton... status.showStatus("Setting up skeleton analysis...") skel = AnalyzeSkeleton_() skel.setup("", skeleton) status.showStatus("Analyzing skeleton...") skel_result = skel.run() status.showStatus("Computing graph based parameters...") branch_lengths = [] summed_lengths = [] graphs = skel_result.getGraph() for graph in graphs: summed_length = 0.0 edges = graph.getEdges() for edge in edges: length = edge.getLength() branch_lengths.append(length) summed_length += length summed_lengths.append(summed_length) output_parameters["branch length mean"] = eztables.statistical.average(branch_lengths) output_parameters["branch length median"] = eztables.statistical.median(branch_lengths) output_parameters["branch length stdevp"] = eztables.statistical.stdevp(branch_lengths) output_parameters["summed branch lengths mean"] = eztables.statistical.average(summed_lengths) output_parameters["summed branch lengths median"] = eztables.statistical.median(summed_lengths) output_parameters["summed branch lengths stdevp"] = eztables.statistical.stdevp(summed_lengths) branches = list(skel_result.getBranches()) output_parameters["network branches mean"] = eztables.statistical.average(branches) output_parameters["network branches median"] = eztables.statistical.median(branches) output_parameters["network branches stdevp"] = eztables.statistical.stdevp(branches) # Create/append results to a ResultsTable... status.showStatus("Display results...") if "Mito Morphology" in list(WindowManager.getNonImageTitles()): rt = WindowManager.getWindow("Mito Morphology").getTextPanel().getOrCreateResultsTable() else: rt = ResultsTable() rt.incrementCounter() for key in output_order: rt.addValue(key, str(output_parameters[key])) # Add user comments intelligently if user_comment != None and user_comment != "": if "=" in user_comment: comments = user_comment.split(",") for comment in comments: rt.addValue(comment.split("=")[0], comment.split("=")[1]) else: rt.addValue("Comment", user_comment) rt.show("Mito Morphology") # Create overlays on the original ImagePlus and display them if 2D... if imp.getNSlices() == 1: status.showStatus("Generate overlays...") IJ.run(skeleton, "Green", "") IJ.run(binary, "Magenta", "") skeleton_ROI = ImageRoi(0,0,skeleton.getProcessor()) skeleton_ROI.setZeroTransparent(True) skeleton_ROI.setOpacity(1.0) binary_ROI = ImageRoi(0,0,binary.getProcessor()) binary_ROI.setZeroTransparent(True) binary_ROI.setOpacity(0.25) overlay = Overlay() overlay.add(binary_ROI) overlay.add(skeleton_ROI) imp.setOverlay(overlay) imp.updateAndDraw() # Generate a 3D model if a stack if imp.getNSlices() > 1: univ = Image3DUniverse() univ.show() pixelWidth = imp_calibration.pixelWidth pixelHeight = imp_calibration.pixelHeight pixelDepth = imp_calibration.pixelDepth # Add end points in yellow end_points = skel_result.getListOfEndPoints() end_point_list = [] for p in end_points: end_point_list.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addIcospheres(end_point_list, Color3f(255.0, 255.0, 0.0), 2, 1*pixelDepth, "endpoints") # Add junctions in magenta junctions = skel_result.getListOfJunctionVoxels() junction_list = [] for p in junctions: junction_list.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addIcospheres(junction_list, Color3f(255.0, 0.0, 255.0), 2, 1*pixelDepth, "junctions") # Add the lines in green graphs = skel_result.getGraph() for graph in range(len(graphs)): edges = graphs[graph].getEdges() for edge in range(len(edges)): branch_points = [] for p in edges[edge].getSlabs(): branch_points.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth)) univ.addLineMesh(branch_points, Color3f(0.0, 255.0, 0.0), "branch-%s-%s"%(graph, edge), True) # Add the surface univ.addMesh(binary) univ.getContent("binary").setTransparency(0.5) # Perform any postprocessing steps... status.showStatus("Running postprocessing...") if postprocessor_path != None: if postprocessor_path.exists(): postprocessor_thread = scripts.run(postprocessor_path, True) postprocessor_thread.get() else: pass status.showStatus("Done analysis!")
I_mx = max(I) I_mn = min(I) maxi = math.sqrt(I_mx * I_mx + I_mn * I_mn) #ig = [I_mx, I_mna] #maxi = max(ig) T_mx = max(T) T_mn = min(T) maxiT = math.sqrt(T_mx * T_mx + T_mn * T_mn) #iT = [T_mx, T_mna] #maxiT = max(iT) Xdir = map(lambda x: x / maxi, I) Ydir = map(lambda x: x / maxiT, T) #calculate z direction from X and Y. Assumes that lights are all equidistant from image center, e.g. a "dome" configuration z = [math.sqrt(a * a + b * b) for a, b in zip(Xdir, Ydir)] zt = map(lambda x: math.sqrt(abs(1 - x * x)), z) #write results to table table = ResultsTable() for i in range(len(Xdir)): table.incrementCounter() table.addValue('X', Xdir[i]) table.addValue('Y', Ydir[i]) table.addValue('Z', zt[i]) table.show('Results')
maximaip = findmaximashow.getProcessor() maximahist = maximaip.getHistogram() CountMethod2 = maximahist[255] print "Using the findMaxima method with a threshold of " + str( THRESH) + ", I found " + str(CountMethod2) + " maxima." IJ.setRawThreshold(findmaximashow, 255, 255, "red") IJ.run(findmaximashow, "Create Selection", "") rm.addRoi(findmaximashow.getRoi()) rm.rename(1, "Maxima with threshold") # ---- METHOD 3-4: API getMaxima WITH THE POINTS COUNTED OR ADDED TO THE RESULTS TABLE maxima = mf.getMaxima(ip, 100.0, False) CountMethod3 = maxima.npoints print "Using the getMaxima method with no threshold , I found " + str( CountMethod3) + " maxima." rt = ResultsTable() for i in range(maxima.npoints): rt.incrementCounter() rt.addValue("X", maxima.xpoints[i]) rt.addValue("Y", maxima.ypoints[i]) rt.show("Results") CountMethod4 = rt.getCounter() print "By counting points generated by getMaxima, I found " + str( CountMethod4) + " maxima." # TODO: add these to the ROI mgr # TODO: as alternative, check out HMaxima transform from landini, basically subtracting the threshold before running the find maxima
IJ.run("Clear Results", "") #generating results table ort = ResultsTable() ort.setPrecision(1) imp_measure = ExtractChannel(imp1, Measure_Channel) imp_measure.show() for i, roi in enumerate(RoiManager.getInstance().getRoisAsArray()): roi2 = rm.getRoiIndex(roi) rm.select(imp_measure, roi2) stats = imp_measure.getStatistics(Measurements.MEAN | Measurements.AREA | Measurements.FERET | Measurements.CENTROID) ort.incrementCounter() ort.addValue("Compartment", (channel2_name)) ort.addValue("Mean intensity", str(stats.mean)) ort.addValue("Area", str(stats.area)) ort.addValue("X_Coordinate", str(stats.xCentroid)) ort.addValue("Y_Coordinate", str(stats.yCentroid)) time.sleep(0.5) rm.runCommand("reset") # segment and measure second Compartment imp3 = Generate_segmented_image(imp1, 3) # # # # Generate ROIs by "Analyse Particles" IJ.run(imp3, "Analyze Particles...", "size=5-Infinity pixel add exclude stack")
def setUp(self): unittest.TestCase.setUp(self) rt = ResultsTable() rt.incrementCounter() rt.addLabel('LABEL', 'ID00002') rt.addValue('ID', 2) rt.addValue('TRACK_ID', 2) rt.addValue('QUALITY', 1) rt.addValue('POSITION_X', 738.9) rt.addValue('POSITION_Y', 670.0) rt.addValue('POSITION_Z', 0) rt.addValue('POSITION_T', 0) rt.incrementCounter() rt.addLabel('LABEL', 'ID00003') rt.addValue('ID', 3) rt.addValue('TRACK_ID', 3) rt.addValue('QUALITY', 1) rt.addValue('POSITION_X', 672.1) rt.addValue('POSITION_Y', 729.3) rt.addValue('POSITION_Z', 0) rt.addValue('POSITION_T', 0) rt.incrementCounter() rt.addLabel('LABEL', 'ID00001') rt.addValue('ID', 31) rt.addValue('TRACK_ID', 1) rt.addValue('QUALITY', 1) rt.addValue('POSITION_X', 953.2) rt.addValue('POSITION_Y', 803.5) rt.addValue('POSITION_Z', 0) rt.addValue('POSITION_T', 1) rt.incrementCounter() rt.addLabel('LABEL', 'ID000032') rt.addValue('ID', 32) rt.addValue('TRACK_ID', 2) rt.addValue('QUALITY', 1) rt.addValue('POSITION_X', 739.5) rt.addValue('POSITION_Y', 665.0) rt.addValue('POSITION_Z', 0) rt.addValue('POSITION_T', 1) rt.incrementCounter() rt.addLabel('LABEL', 'ID000033') rt.addValue('ID', 33) rt.addValue('TRACK_ID', 3) rt.addValue('QUALITY', 1) rt.addValue('POSITION_X', 667.0) rt.addValue('POSITION_Y', 729.8) rt.addValue('POSITION_Z', 0) rt.addValue('POSITION_T', 1) rt.incrementCounter() rt.addLabel('LABEL', 'ID000061') rt.addValue('ID', 61) rt.addValue('TRACK_ID', 1) rt.addValue('QUALITY', 1) rt.addValue('POSITION_X', 959.0) rt.addValue('POSITION_Y', 805.5) rt.addValue('POSITION_Z', 0) rt.addValue('POSITION_T', 2) self.table = rt
imp1.setC(measure_channel) roi = imp1.getRoi() stats = imp1.getStatistics(Measurements.MEAN | Measurements.AREA | Measurements.FERET | Measurements.CENTROID) MeanChannel1.append(stats.mean) # Generate results table and display results ort = ResultsTable() ort.setPrecision(3) count = len(MeanChannel1) for i in range(len(MeanChannel1)): ort.incrementCounter() ort.addValue("ROI", i) ort.addValue(channelname, MeanChannel1[i]) ort.show("Measured intensities") # Save Results in CSV file if autosave_results: dataname = imp1.getShortTitle() filename = dataname + "_001.csv" savename = savepath + "/" + filename ort.saveAs(savename) rm.runCommand("Deselect") rm.runCommand("Delete")
FEATURES = ["POSITION_X", "POSITION_Y", "FRAME"] trackIDs = model.getTrackModel().trackIDs(True) results = ResultsTable() # Parse spots to insert values as objects for trackID in trackIDs: track = model.getTrackModel().trackSpots(trackID) # Sort by frame sortedTrack = list(track) sortedTrack.sort(key=lambda s: s.getFeature("FRAME")) for spot in sortedTrack: results.incrementCounter() results.addValue(ID_COLUMN, "" + str(spot.ID())) # results.addValue(CELL_LABEL_COLUMN,str(int(spot.getFeature("MAX_INTENSITY")))) results.addValue(CELL_LABEL_COLUMN, spot.getName()) results.addValue(TRACK_ID_COLUMN, "" + str(trackID)) for feature in FEATURES: val = spot.getFeature(feature) if math.isnan(val): results.addValue(feature.lower(), "None") else: results.addValue(feature.lower(), "" + str(int(val))) parents = [] children = [] for edge in model.getTrackModel().edgesOf(spot): source, target = model.getTrackModel().getEdgeSource( edge), model.getTrackModel().getEdgeTarget(edge)
for peak in peaks: # Read peak coordinates into an array of integers peak.localize(p) # Define limits of the interval around the peak: # (sigmaSmaller is half the radius of the embryo) minC = [int(p[i] - sigmaSmaller) for i in xrange(img.numDimensions())] maxC = [int(p[i] + sigmaSmaller) for i in xrange(img.numDimensions())] # View the interval around the peak, as a flat iterable (like an array) square = Views.flatIterable(Views.zeroMin(Views.interval(img, minC, maxC))) s1 = sum(t.getInteger() for t in square) area1 = Intervals.numElements(square) # Use a sphere instead radius = sqrt(area1 / pi) # same area for both print sigmaSmaller, radius circle = Masks.toIterableRegion(GeomMasks.closedSphere(p, radius)) s2 = sum(t.getInteger() for t in Regions.sample(circle, imgE)) area2 = Intervals.numElements(circle) print area1, area2 # Compute sum of pixel intensity values of the interval # (The t is the Type that mediates access to the pixels, via its get* methods) # Add to results table table.incrementCounter() table.addValue("x", p[0]) table.addValue("y", p[1]) table.addValue("avg square", float(s1) / area1) table.addValue("avg circle", float(s2) / area2) table.show("Embryo intensities at peaks")
"size=5-Infinity pixel add exclude stack") IJ.run("Clear Results", "") ort = ResultsTable() ort.setPrecision(1) imp_measure = ExtractChannel(imp1, Measure_Channel) imp_measure.show() for i, roi in enumerate(RoiManager.getInstance().getRoisAsArray()): roi2 = rm.getRoiIndex(roi) rm.select(imp_measure, roi2) stats = imp_measure.getStatistics(Measurements.MEAN | Measurements.AREA | Measurements.FERET | Measurements.CENTROID) ort.incrementCounter() #ort.addValue("ROI", str(i)) ort.addValue("Mean intensity", str(stats.mean)) ort.addValue("Area", str(stats.area)) ort.addValue("Compartment", (channel2_name)) #ort.show("Results") imp_measure.close() filename = dataname + "_001.csv" savename = "D:/" + filename ort.saveAs(savename) imp1.close() # # # IJ.run(imp_measure, "Make Band...", "band=0.63") # values are in um, assumes pixelsize of 0.21 # # # imp_measure.setRoi(roi)
while goRun: wfud = WaitForUserDialog("Pick freehand ROI, then hit OK to analyze") wfud.show() roi = theImage.getRoi() if roi is None: goRun = False else: dataImage.setRoi(roi) subImage = dataImage.duplicate() dataIp = dataImage.getProcessor() dataIp.setRoi(roi) maskIp = dataIp.getMask() maskImage = ImagePlus("Mask Image",maskIp) ic = ImageCalculator() countingImage = ic.run("AND create stack",subImage,maskImage) pixelCount = 0 for i in range(1,countingImage.getNSlices()+1): countingImage.setSlice(i) countingIp = countingImage.getProcessor() for x in range(0,countingImage.getWidth()): for y in range(0,countingImage.getHeight()): if (countingIp.getPixel(x,y) >= intensityThreshold): pixelCount = pixelCount + 1 totAvailablePixels = countingImage.getWidth() * countingImage.getHeight() * countingImage.getNSlices() #IJ.log("Pixel count: " + str(pixelCount) + " of " + str(totAvailablePixels)) countingImage.close() rt.incrementCounter() rt.addValue("PosPixels",pixelCount) rt.addValue("TotPixels",totAvailablePixels) rt.show("DMI Results")
x = int(rt.getValue("X", count)) y = int(rt.getValue("Y", count)) # if that cell contains these coordinates, add the cell # number as "cell" value for that foci if roi.contains(x, y): rt.setValue("cell", count, cell) cell = cell + 1 # save this results table rt.save(directory + "/" + filename + "_GFP.csv") print("saving at ", directory + "/" + filename + "_GFP.csv") # create summary resulta table, with "cell" and "foci_count" columns consol = ResultsTable() consol.incrementCounter() consol.addValue("cell", 0) consol.addValue("foci_count", 0) rowcount = 1 # loop over all cells, add cell number to the "cell" column for count in range(cell): consol.setValue("cell", count, count) # loop over all foci for count in range(rt.size()): # get in which cell that foci is and increase the # counter on the summary results table currcell = int(rt.getValue("cell", count)) consol.setValue( "foci_count", currcell,
def reportClustersAsTable(clusters, allPoints, XColumn='X', YColumn='Y', ZColumn='Z', NRColumn='NR'): ''' Report the clustered and unclustered points in the tables 'clusters' and 'unclustered'. ''' rt = ResultsTable() counter = 1 clusterCounter = 1 clusteredPoints = [] for c in clusters: for dp in c.getPoints(): rt.incrementCounter() p = dp.getPoint() rt.addValue(NRColumn, counter) rt.addValue(XColumn, p[0]) rt.addValue(YColumn, p[1]) rt.addValue(ZColumn, p[2]) rt.addValue("C", clusterCounter) counter = counter + 1 clusteredPoints.append([p[0], p[1], p[2]]) clusterCounter = clusterCounter + 1 rt.show("clusters") win = WindowManager.getWindow("Results") rt = win.getResultsTable() X, Y, Z = getColumns(XColumn, YColumn, ZColumn) if not rt.columnExists(NRColumn): for i in range(0, len(X)): rt.setValue(NRColumn, i, i + 1) rt.updateResults() NR = getColumn(NRColumn) unclusteredPoints = [ [point.getPoint()[0], point.getPoint()[1], point.getPoint()[2]] for point in allPoints if [point.getPoint()[0], point.getPoint()[1], point.getPoint()[2]] not in clusteredPoints ] counter = 1 rt = ResultsTable() for p in unclusteredPoints: rt.incrementCounter() rt.addValue(NRColumn, counter) rt.addValue(XColumn, p[0]) rt.addValue(YColumn, p[1]) rt.addValue(ZColumn, p[2]) counter = counter + 1 rt.show("unclustered") WindowManager.setWindow(win)
def __fmeasures(self) : self.__Cutoff = float(self.__display4.text) nslices = self.__impRes.getImageStackSize() rt = ResultsTable() rt.show("RT-"+self.__name) if self.__maxfinder : twpoints = TextWindow("points-"+self.__name, "index\tlabel\tname\tx\ty\taxis\tcellw\tcellh", "", 200, 450) twlabels = TextWindow("labels-"+self.__name, "index\tlabel\tname\tnpoints", "", 200, 450) isres = self.__impRes.getImageStack() for index in range(1, nslices+1): pc = (index*100)/nslices IJ.showStatus("Je suis a "+str(pc)+"%") self.__impRes.setSlice(index) self.__impRes.killRoi() roi = self.__listrois[index-1] self.__impRes.setRoi(roi) analyser= Analyzer(self.__impRes, Analyzer.LABELS+Analyzer.CENTER_OF_MASS+Analyzer.CENTROID+Analyzer.INTEGRATED_DENSITY+Analyzer.MEAN+Analyzer.KURTOSIS+Analyzer.SKEWNESS+Analyzer.MIN_MAX+Analyzer.SLICE+Analyzer.STACK_POSITION+Analyzer.STD_DEV, rt) analyser.measure() rt.show("RT-"+self.__name) rect=roi.getBounds() ip = self.__impRes.getProcessor() xCoord = [] yCoord = [] currentPixel = [] m00 = 0.00 m10 = 0.00 m01 = 0.00 mc20 = 0.00 mc02 = 0.00 mc11 = 0.00 mc30 = 0.00 mc03 = 0.00 mc21 = 0.00 mc12 = 0.00 mc40 = 0.00 mc04 = 0.00 mc31 = 0.00 mc13 = 0.00 mm20 = 0.00 mm02 = 0.00 mm11 = 0.00 mm30 = 0.00 mm03 = 0.00 mm21 = 0.00 mm12 = 0.00 mm40 = 0.00 mm04 = 0.00 mm31 = 0.00 mm13 = 0.00 #for y in range(rect.y, rect.y+rect.height, 1) : # for x in range(rect.x, rect.x+rect.width, 1) : # xCoord.append(x+0.5) # yCoord.append(y+0.5) # #pixel=ip.getf(x,y)-self.__Cutoff # pixel = ip.getPixelValue(x,y)-self.__Cutoff # if pixel < 0 : pixel = 0 # currentPixel.append(pixel) # m00 += currentPixel[-1] # m10 += currentPixel[-1]*xCoord[-1] # m01 += currentPixel[-1]*yCoord[-1] #xm = m10/(m00+0.00000001) #ym = m01/(m00+0.00000001) #xc = rect.width/2.00 #yc = rect.height/2.00 #for i in range(rect.width*rect.height) : # xcrel = xCoord[i]-xc # ycrel = yCoord[i]-yc # #mc20 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc) # #mc02 += currentPixel[i]*(yCoord[i]-yc)*(yCoord[i]-yc) # #mc11 += currentPixel[i]*(xCoord[i]-xc)*(yCoord[i]-yc) # # # #mc30 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc)*(xCoord[i]-xc) # #mc03 += currentPixel[i]*(yCoord[i]-yc)*(yCoord[i]-yc)*(yCoord[i]-yc) # #mc21 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc)*(yCoord[i]-yc) # #mc12 += currentPixel[i]*(xCoord[i]-xc)*(yCoord[i]-yc)*(yCoord[i]-yc) # # # #mc40 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc)*(xCoord[i]-xc)*(xCoord[i]-xc) # #mc04 += currentPixel[i]*(yCoord[i]-yc)*(yCoord[i]-yc)*(yCoord[i]-yc)*(yCoord[i]-yc) # #mc31 += currentPixel[i]*(xCoord[i]-xc)*(xCoord[i]-xc)*(xCoord[i]-xc)*(yCoord[i]-yc) # #mc13 += currentPixel[i]*(xCoord[i]-xc)*(yCoord[i]-yc)*(yCoord[i]-yc)*(yCoord[i]-yc) # mc20 += currentPixel[i]*xcrel*xcrel # mc02 += currentPixel[i]*ycrel*ycrel # mc11 += currentPixel[i]*xcrel*ycrel # mc30 += currentPixel[i]*xcrel*xcrel*xcrel # mc03 += currentPixel[i]*ycrel*ycrel*ycrel # mc21 += currentPixel[i]*xcrel*xcrel*ycrel # mc12 += currentPixel[i]*xcrel*ycrel*ycrel # mc40 += currentPixel[i]*xcrel*xcrel*xcrel*xcrel # mc04 += currentPixel[i]*ycrel*ycrel*ycrel*ycrel # mc31 += currentPixel[i]*xcrel*xcrel*xcrel*ycrel # mc13 += currentPixel[i]*xcrel*ycrel*ycrel*ycrel #for i in range(rect.width*rect.height) : # mm20 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm) # mm02 += currentPixel[i]*(yCoord[i]-ym)*(yCoord[i]-ym) # mm11 += currentPixel[i]*(xCoord[i]-xm)*(yCoord[i]-ym) # mm30 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm)*(xCoord[i]-xm) # mm03 += currentPixel[i]*(yCoord[i]-ym)*(yCoord[i]-ym)*(yCoord[i]-ym) # mm21 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm)*(yCoord[i]-ym) # mm12 += currentPixel[i]*(xCoord[i]-xm)*(yCoord[i]-ym)*(yCoord[i]-ym) # mm40 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm)*(xCoord[i]-xm)*(xCoord[i]-xm) # mm04 += currentPixel[i]*(yCoord[i]-ym)*(yCoord[i]-ym)*(yCoord[i]-ym)*(yCoord[i]-ym) # mm31 += currentPixel[i]*(xCoord[i]-xm)*(xCoord[i]-xm)*(xCoord[i]-xm)*(yCoord[i]-ym) # mm13 += currentPixel[i]*(xCoord[i]-xm)*(yCoord[i]-ym)*(yCoord[i]-ym)*(yCoord[i]-ym) #xxcVar = mc20/m00 #yycVar = mc02/m00 #xycVar = mc11/m00 #xcSkew = mc30/(m00 * math.pow(xxcVar,(3.0/2.0))) #ycSkew = mc03/(m00 * math.pow(yycVar,(3.0/2.0))) #xcKurt = mc40 / (m00 * math.pow(xxcVar,2.0)) - 3.0 #ycKurt = mc04 / (m00 * math.pow(yycVar,2.0)) - 3.0 #ecc = (math.pow((mc20-mc02),2.0)+(4.0*mc11*mc11))/m00 #xxmVar = mm20/m00 #yymVar = mm02/m00 #xymVar = mm11/m00 #xmSkew = mm30/(m00 * math.pow(xxmVar,(3.0/2.0))) #ymSkew = mm03/(m00 * math.pow(yymVar,(3.0/2.0))) #xmKurt = mm40 / (m00 * math.pow(xxmVar,2.0)) - 3.0 #ymKurt = mm04 / (m00 * math.pow(yymVar,2.0)) - 3.0 #ecm = (math.pow((mm20-mm02),2.0)+(4.0*mm11*mm11))/m00 #rt.addValue("xxcVar", xxcVar) #rt.addValue("yycVar", yycVar) #rt.addValue("xycVar", xycVar) #rt.addValue("xcSkew", xcSkew) #rt.addValue("ycSkew", ycSkew) #rt.addValue("xcKurt", xcKurt) #rt.addValue("ycKurt", ycKurt) #rt.addValue("Ecc", ecc) #rt.addValue("xxmVar", xxmVar) #rt.addValue("yymVar", yymVar) #rt.addValue("xymVar", xymVar) #rt.addValue("xmSkew", xmSkew) #rt.addValue("ymSkew", ymSkew) #rt.addValue("xmKurt", xmKurt) #rt.addValue("ymKurt", ymKurt) #rt.addValue("Ecm", ecm) rt.addValue("roiw", rect.width) rt.addValue("roih", rect.height) rt.addValue("cellw", self.__ipw[index-1]) rt.addValue("cellh", self.__iph[index-1]) self.__impRes.killRoi() xCoord[:] = [] yCoord[:] = [] currentPixel[:] = [] points = [] points[:] = [] npointsmax = 0 #lab = self.__labels[index-1] nameroi = self.__dictCells[index][0] lab = self.__dictCells[index][1] if self.__maxfinder : self.__impMax.setSlice(index) ipmax = self.__impMax.getProcessor() for y in range(ipmax.getHeight()) : for x in range(ipmax.getWidth()) : if ipmax.getPixelValue(x,y) > 0 : twpoints.append(str(index)+"\t"+lab+"\t"+nameroi+"\t"+str(x)+"\t"+str(y)+"\t"+str(self.__cellsrois[index-1][0].getLength())+"\t"+str(self.__ipw[index-1])+"\t"+str(self.__iph[index-1])) npointsmax+=1 rt.addValue("npoints", npointsmax) twlabels.append(str(index)+"\t"+lab+"\t"+nameroi+"\t"+str(npointsmax)) rt.show("RT-"+self.__name) rt.show("RT-"+self.__name)
def main(imp,options): from ij.plugin import ChannelSplitter from ij.gui import Roi,PointRoi, PolygonRoi, Overlay, Line from java.awt import Color from ij import WindowManager from ij.measure import ResultsTable from ij.text import TextWindow active_z=imp.getZ() imps = ChannelSplitter.split(imp) imp.setZ(active_z) roi_int = imp.getRoi() comp_imp=Zproj(imps[options["comp_ch"]], "SUM", active_z, options["z_range"]) comp_imp=mode_subtract(comp_imp,roi_int) loci_imp=Zproj(imps[options["loci_ch"]], "SUM", imp.getZ(), options["z_range"]) loci_imp=mode_subtract(loci_imp,roi_int) #Finding the boundaries of compartment and loci comp_roi=thresh(sum_prj=comp_imp,thresh=options["comp_T"],roi=roi_int,method="boundary") print "ok" if (options["loci_method"]== "locus center"): loci_roi=thresh(sum_prj=loci_imp, thresh=options["loci_T"], roi=roi_int, method="point") elif options["loci_method"]== "locus boundary": loci_roi=thresh(sum_prj=loci_imp, thresh=options["loci_T"], roi=roi_int, method="boundary") if options["loci_method"]== "locus center": dist,xc,yc,xl,yl=get_center_edge_dist(imp,comp_roi, loci_roi) elif options["loci_method"]== "locus boundary": dist,xc,yc,xl,yl=get_closest_points(imp,comp_roi,loci_roi) rt_exist = WindowManager.getWindow("Loci distance to compartment") if rt_exist==None or not isinstance(rt_exist, TextWindow): table= ResultsTable() else: table = rt_exist.getTextPanel().getOrCreateResultsTable() table.incrementCounter() table.addValue("Label", imp.title) table.addValue("Distance(micron)", dist) if options['measure_feret']: feret_roi,loci_feret,loci_area= feret(sum_prj=loci_imp,thresh=options["loci_T"], roi=roi_int,pixel_size=imp.getCalibration().pixelWidth) table.addValue("Loci feret", loci_feret) table.addValue("Loci area", loci_area) table.show("Loci distance to compartment") ## Adding loci overlay ov=imp.getOverlay() if ov==None: ov=Overlay() line = Line(xc,yc, xl,yl) line.setStrokeWidth(0.2) line.setStrokeColor(Color.PINK) ov.add(line) if options["loci_method"]== "locus center": ov.add(PointRoi(loci_roi["x"],loci_roi["y"])) elif options["loci_method"]== "locus boundary": ov.add(loci_roi) if options['measure_feret']: ov.add(feret_roi) ov.add(comp_roi) imp.setOverlay(ov)
resultsTable.showRowNumbers(False) for i in range(0, len(results)): if options['oneShot']: localBackground = options['localBackground'] seedRadius = options['seedRadius'] gaussXY = options['gaussXY'] gaussZ = options['gaussZ'] else: localBackground = parameters[i]['localBackground'] seedRadius = parameters[i]['seedRadius'] gaussXY = parameters[i]['gaussXY'] gaussZ = parameters[i]['gaussZ'] resultsTable.incrementCounter() resultsTable.addValue("Threshold", localBackground) resultsTable.addValue("Seed radius", seedRadius) resultsTable.addValue("GXY", gaussXY) resultsTable.addValue("GZ", gaussZ) resultsTable.addValue("TOTAL", results[i]['all']) resultsTable.addValue("0-250", results[i]['0']) resultsTable.addValue("251-500", results[i]['250']) resultsTable.addValue("501-750", results[i]['500']) resultsTable.addValue("751-1000", results[i]['750']) resultsTable.addValue("1001-1500", results[i]['1000']) resultsTable.addValue(">1501", results[i]['1500']) resultsTable.addValue("Skipped", results[i]['edge']) resultsTable.save(options['outputDir'] + options['outputFile']) else:
def open_Octopus_file(): # set up a file info structure fi = FileInfo() fi.fileFormat = fi.RAW fi.fileType=FileInfo.GRAY16_UNSIGNED fi.intelByteOrder = True fi.nImages = 1 op = OpenDialog("Choose Octopus .dth file...", "") if not op.getDirectory(): return False # get the file extension file_extension = re.search('(\.[a-z][a-z][a-z])', op.getFileName()).group(1) if file_extension != ".dth": dlg = GenericDialog("Warning") dlg.addMessage("Please select an octopus .dth file") dlg.showDialog() return False # now strip the filename into a stem and index file_parse = re.match('([a-zA-z0-9_]*_)([0-9]+)\.dth', op.getFileName()) file_stem = file_parse.group(1) file_index = int( file_parse.group(2) ) # ok now we need to parse the header info header = get_Octopus_header(op.getDirectory(), file_stem, file_index) fi.nImages = len(header['N']) # check to see whether we have a bit depth, if not, assume 16-bit if 'Bit_Depth' in header: print header['Bit_Depth'] bit_depth = int(header['Bit_Depth'][0]) if bit_depth == 8: fi.fileType = FileInfo.GRAY8 else: bit_depth = 16 # will assume that all files have the same size fi.width = int( header['W'][0] ) fi.height = int( header['H'][0] ) file_timestamp = strftime("%a, %d %b %Y %H:%M:%S", gmtime(float(header['Time'][0])) ) # make a new imagestack to store the data stack = ImageStack(fi.width, fi.height) # finally, we need to make a list of files to import as sometimes we have # non contiguous file numbers try: files = os.listdir(op.getDirectory()) except IOError: raise IOError( "No files exist in directory: " + op.getDirectory()) filenums = [] for f in files: # strip off the stem, and get the number targetfile = re.match(file_stem+'([0-9]+)\.dth', f) # only take thosefiles which match the formatting requirements if targetfile: filenums.append( int(targetfile.group(1)) ) # sort the file numbers sorted_filenums = sorted(filenums) # make a file stats string file_stats_str = file_stem + '\n' + str(fi.width) +'x' + str(fi.height) + 'x' + \ str(len(sorted_filenums)) +' ('+str(bit_depth)+'-bit)\n' + file_timestamp # now open a dialog to let the user set options dlg = GenericDialog("Load Octopus Stream (v"+__version__+")") dlg.addMessage(file_stats_str) dlg.addStringField("Title: ", file_stem) dlg.addNumericField("Start: ", 1, 0); dlg.addNumericField("End: ", len(sorted_filenums), 0) dlg.addCheckbox("Open headers", True) dlg.addCheckbox("Contiguous stream?", False) dlg.addCheckbox("8-bit unsigned", bit_depth==8) dlg.showDialog() # if we cancel the dialog, exit here if dlg.wasCanceled(): return # set some params file_title = dlg.getNextString() file_start = dlg.getNextNumber() file_end = dlg.getNextNumber() DISPLAY_HEADER = bool( dlg.getNextBoolean() ) # check the ranges if file_start > file_end: file_start, file_end = file_end, file_start if file_start < 1: file_start = 1 if file_end > len(sorted_filenums): file_end = len(sorted_filenums) # now set these to the actual file numbers in the stream file_start = sorted_filenums[int(file_start)-1] file_end = sorted_filenums[int(file_end)-1] files_to_open = [n for n in sorted_filenums if n>=file_start and n<=file_end] # if we've got too many, truncate the list if (len(files_to_open) * fi.nImages * fi.width * fi.height) > (MAX_FRAMES_TO_IMPORT*512*512): dlg = GenericDialog("Warning") dlg.addMessage("This may use a lot of memory. Continue?") dlg.showDialog() if dlg.wasCanceled(): return False IJ.log( "Opening file: " + op.getDirectory() + op.getFileName() ) IJ.log( file_stats_str + "\nFile range: " + str(files_to_open[0]) + \ "-" + str(files_to_open[-1]) +"\n" ) # make a results table for the metadata # NOTE: horrible looping at the moment, but works if DISPLAY_HEADER: rt = ResultsTable() # ok now we can put the files together into the stack for i in files_to_open: # open the original .dat file and get the stack fi.fileName = get_Octopus_filename( op.getDirectory(), file_stem, i) if os.path.isfile( fi.fileName ): fo = FileOpener(fi) imp = fo.open(False).getStack() # put the slices into the stack for im_slice in xrange( imp.getSize() ): ip = imp.getProcessor( im_slice+1 ) if bit_depth == 8: bi = ip.getBufferedImage() else: bi = ip.get16BitBufferedImage() stack.addSlice( file_title, ip ) if DISPLAY_HEADER: header = get_Octopus_header(op.getDirectory(), file_stem, i) for n in xrange(len(header['N'])): rt.incrementCounter() for k in header.keys(): rt.addValue(k, parse_header( header[k][n] ) ) else: break # done! output = ImagePlus('Octopus ('+file_stem+')', stack) output.show() if DISPLAY_HEADER: rt.show("Octopus header metadata") return True
Prefs.blackBackground = False IJ.run(mask, "Make Binary", "") IJ.run(mask, "Invert", "") #Colocalization Test IJ.run( red, "Colocalization Test", "channel_1=[" + red.getTitle() + "] channel_2=[" + green.getTitle() + "] roi=mask randomization=[Fay (x,y,z translation)]") red.close() green.close() mask.changes = False mask.close() #get ResultsTable object and save it textpanel = WindowManager.getFrame("Results").getTextPanel() resultline = textpanel.getLine(textpanel.getLineCount() - 1) value = float(resultline.split('\t')[1]) print value allResultsTitle = "Summary" frame = WindowManager.getFrame(allResultsTitle) summaryrt = None if frame is not None: summaryrt = frame.getTextPanel().getResultsTable() else: summaryrt = ResultsTable() summaryrt.incrementCounter() summaryrt.addValue("Image", imp.getTitle()) summaryrt.addValue("R(obs)", value) summaryrt.show(allResultsTitle) #imagetitle = image.getTitle().split(".")[0] #rtfile = path.join(outputdir, str(size)+"Summary_" + imagetitle + ".csv")
allPearson.append(pearson) IJ.run(imp, "Find Maxima...", noisec2_1) proi2 = IJ.getImage().getRoi() points_C2 = [] if proi2.getClass() == PointRoi: px2 = proi2.getXCoordinates() py2 = proi2.getYCoordinates() bounds2 = proi2.getBounds() #points_C2 = [] for i in range(proi2.getNCoordinates()): points_C2.append((bounds2.x + px2[i], bounds2.y + py2[i])) #points.append((px[i], py[i])) #print points_C2 if points_C1 == [] or points_C2 == []: ort.incrementCounter() ort.addValue("Cell", cell) if points_C1 == []: print 'No spots found in Channel1 in Cell' + str(cell) if points_C2 == []: print 'No spots found in Channel2 in Cell' + str(cell) ort.addValue("Point_C1", 'Invalid') ort.addValue("Distance in um", 'Invalid') ort.addValue("Distance in pixel", 'Invalid') ort.addValue("Area in um", area) ort.addValue("Feret in um", feret) ort.addValue("Fittet Elipse - Length", str(round(ellipse_length, 3))) ort.addValue("Fittet Elipse - Width", str(round(ellipse_width, 3))) ort.addValue("Center", str(center)) ort.addValue("Center - Point_C1 in um", 'Invalid') ort.addValue("Center - Point_C2 in um", 'Invalid') ort.addValue("Center - Point_C1", 'Invalid')
if do_dog_C2: imp_DoG = DoG(imp_threshold, 1, 3) else: imp_DoG = imp_threshold #imp_DoG.show() #IJ.run(imp_DoG, "Z Project...", "projection=[Max Intensity] all") imp_DoG.setTitle("Difference_of_Gaussian") MeanChannel1 = [] MeanChannel2 = [] maximaC2 = mf.getMaxima(imp_DoG.getProcessor(), noise_C2, True) print maximaC2.npoints imp_threshold.close() imp_DoG.close() ort.incrementCounter() ort.addValue("Frame", i) ort.addValue("Cells", Channel1_count) ort.addValue("Spots C1", maximaC1.npoints) ort.addValue("Spots C2", maximaC2.npoints) IJ.run(imp1, "Select None", "") ort.show("Counted spots") if automatic_save_results: dataname = imp1.getTitle() filename = dataname + ".csv" #files = glob.glob(savepath+"/"+dataname+"*.csv") savename = savepath + "/" + filename ort.saveAs(savename) imp1.changes = False
roi = rm.getRoi(i) raw_ch1.setRoi(roi) stats = raw_ch1.getStatistics() rm.setSelectedIndexes([i, size - 1]) rm.runCommand(raw_ch1, "AND") rm.addRoi(raw_ch1.getRoi()) roi = rm.getRoi(rm.getCount() + 1) stats2 = raw_ch1.getStatistics() rm.setSelectedIndexes([i, rm.getCount()]) rm.runCommand(raw_ch1, "XOR") rm.addRoi(raw_ch1.getRoi()) roi = rm.getRoi(rm.getCount() + 1) stats3 = raw_ch1.getStatistics() table.addValue("Area full section", stats.area) table.addValue("Sum of Intesity", stats.area * stats.mean) table.addValue("Mean Int Pixels", stats.mean) table.addValue("Area Foreground", stats2.area) table.addValue("Sum of Intesity Foreground", stats2.area * stats2.mean) table.addValue("Mean Int Pixels Foreground", stats2.mean) table.addValue("Area Background", stats3.area) table.addValue("Sum of Intesity Background", stats3.area * stats.mean) table.addValue("Mean Int Pixels Background", stats3.mean) table.show("Results Analysis")
def runScript(): # find table with trajectories rt = findResultsTable(inputTableName) if rt == None: print("Results table window titled [" + inputTableName + "] not found!") return numOfRows = rt.getCounter() if numOfRows > 1: #create output tableName if showOutputTable: velocityRT = ResultsTable() # if output in csv format requested print header if printOutputData: print("trajectory;pathLen;noOfFrames;velocity;velocity stddev") # read first data point trajectoryId = rt.getValue("Trajectory", 0) posX = rt.getValue("x", 0) posY = rt.getValue("y", 0) posZ = rt.getValue("z", 0) startFrame = rt.getValue("Frame", 0) frame = startFrame trajDistances = [] # distances per frame measured in pixels for idx in range(1, numOfRows): currTrajectoryId = rt.getValue("Trajectory", idx) currPosX = rt.getValue("x", idx) currPosY = rt.getValue("y", idx) currPosZ = rt.getValue("z", idx) currFrame = rt.getValue("Frame", idx) # if true we are still reading data from 'current' trajectory if trajectoryId == currTrajectoryId: distance = ((currPosX - posX)**2 + (currPosY - posY)**2 + (currPosZ - posZ)**2)**(0.5) # since trajectory data point can "jump" over some frames (depending on link range setting) calculated distance # is divided to get average per one frame and added multiple time if needed (necessary for calculating correct stddev) noOfFrames = currFrame - frame distancePerFrame = distance / (currFrame - frame) for i in range(int(noOfFrames)): trajDistances.append(distancePerFrame) # if true we have read data point from next trajectory or this is very last point of data # calculate trajectory info if trajectoryId != currTrajectoryId or idx == numOfRows - 1: # calculate trajectory lenght trajDistancesTrueLen = [ pixelResolution * d for d in trajDistances ] pathLen = sum(trajDistancesTrueLen) # calculate length of trajectory in frames if idx == numOfRows - 1: stopFrame = currFrame else: stopFrame = frame numOfFrames = stopFrame - startFrame # calculate velocity and std dev velocity = pathLen / (numOfFrames * frameInterval) standardDev = stddev( [d / frameInterval for d in trajDistancesTrueLen]) # if output in csv format requested print it if printOutputData: print( str(int(trajectoryId)) + ";" + str(pathLen) + ";" + str(numOfFrames) + ";" + str(velocity) + ";" + str(standardDev)) # if output table requested update it with data if showOutputTable: velocityRT.incrementCounter() velocityRT.addValue("trajectory", int(trajectoryId)) velocityRT.addValue("pathLen", pathLen) velocityRT.addValue("noOfFrames", numOfFrames) velocityRT.addValue("velocity", velocity) velocityRT.addValue("velocity stddev", standardDev) # beginning of new trajectory trajDistances = [] startFrame = rt.getValue("Frame", idx) trajectoryId = currTrajectoryId posX = currPosX posY = currPosY posZ = currPosZ frame = currFrame velocityRT.show(outputTableName)
imp_DoG = imp_threshold #imp_DoG.show() #IJ.run(imp_DoG, "Z Project...", "projection=[Max Intensity] all") imp_DoG.setTitle("Difference_of_Gaussian") MeanChannel1 = [] MeanChannel2= [] maximaC2 = mf.getMaxima(imp_DoG.getProcessor(), noise_C2, True) #print maximaC2.npoints imp_threshold.close() imp_DoG.close() ort.incrementCounter() ort.addValue("Frame", i) ort.addValue("Spots C1", maximaC1.npoints) ort.addValue("Spots C2", maximaC2.npoints) ort.addValue("Area occupied with cells in %", str((100-100/stats_all.area*stats_background.area))) ort.addValue("Area occupied with cells", str((stats_all.area-stats_background.area))) ort.addValue("Noise C1", str(noise_C1)) ort.addValue("Noise C2", str(noise_C2)) IJ.run(imp1, "Select None", "") ort.show("Counted spots") if automatic_save_results: dataname = imp1.getShortTitle() filename = dataname+".csv" #files = glob.glob(savepath+"/"+dataname+"*.csv")
#print MeanChannel2 #print MeanChannel3 #print NormChannel1 #print NormChannel2 #print NormChannel3 #print XYCoordinates #print Distance #print Velocity ort = ResultsTable() ort.setPrecision(3) #print ort.getCounter count = len(MeanChannel1) for i in range(count): ort.incrementCounter() ort.addValue("Frame", i) ort.addValue("Channel 1", MeanChannel1[i]) ort.addValue("X coordinate", str(XYCoordinates[i][0])) ort.addValue("Y coordinate", str(XYCoordinates[i][1])) ort.addValue("Distance in um", str((Distance[i]*Pixelsize))) ort.addValue("Velocity in um/s", str((Velocity[i]*Pixelsize))) ort.addValue("Timepoint", str(movietime[i])) ort.show("Measured intensities") dataname = imp.getShortTitle() filename_tif = dataname+"_gallery_001.tif" files_tif = glob.glob(savepath+"/Galleries/"+dataname+"*.tif") files_tif.sort()
# # # # Generate ROIs by "Analyse Particles" IJ.run(Channel4, "Analyze Particles...", "size=5-Infinity pixel add exclude stack") IJ.run("Clear Results", "") Channel4_count = RoiManager.getInstance().getCount() print Channel4_count time.sleep(0.5) rm.runCommand("reset") time.sleep(0.5) ort = ResultsTable() ort.setPrecision(2) ort.incrementCounter() ort.addValue("Channel1", Channel1_count) ort.addValue("Channel2", Channel2_count) ort.addValue("Channel3", Channel3_count) ort.addValue("Channel4", Channel4_count) ort.show("Results") if automatic_save_results: dataname = imp1.getTitle() filename = dataname + ".csv" #files = glob.glob(savepath+"/"+dataname+"*.csv") savename = savepath + "/" + filename ort.saveAs(savename) Channel1.changes = False Channel1.close() Channel2.changes = False
br_uncorr = math.sqrt((x_col[1] - x_col[0])**2 + (y_col[1] - y_col[0])**2) br_thick = (z_col[1] - z_col[0]) * z_thick br_corr = math.sqrt(br_uncorr**2 + br_thick**2) po_uncorr = math.sqrt((x_col[2] - x_col[1])**2 + (y_col[2] - y_col[1])**2) po_thick = (z_col[2] - z_col[1]) * z_thick po_corr = math.sqrt(po_uncorr**2 + po_thick**2) mbl = math.sqrt((x_col[4] - x_col[3])**2 + (y_col[4] - y_col[3])**2) sl = math.sqrt((x_col[6] - x_col[5])**2 + (y_col[6] - y_col[5])**2) sw = math.sqrt((x_col[8] - x_col[7])**2 + (y_col[8] - y_col[7])**2) IJ.run("Clear Results") # Append to custom Measurement table or create it if non existing MeasureTable = WindowManager.getWindow("Measurements") if MeasureTable == None: MeasureTable = ResultsTable() else: MeasureTable = WindowManager.getWindow("Measurements") MeasureTable = MeasureTable.getTextPanel().getOrCreateResultsTable() MeasureTable.incrementCounter() MeasureTable.addValue("BR", br_corr) MeasureTable.addValue("PO", po_corr) MeasureTable.addValue("MBL", mbl) MeasureTable.addValue("SL", sl) MeasureTable.addValue("SW", sw) MeasureTable.show("Measurements")
for peak in peaks: # Read peak coordinates into an array of integers peak.localize(p) roi.addPoint(imp, p[0], p[1]) imp.setRoi(roi) # Now, iterate each peak, defining a small interval centered at each peak, # and measure the sum of total pixel intensity, # and display the results in an ImageJ ResultTable. table = ResultsTable() for peak in peaks: # Read peak coordinates into an array of integers peak.localize(p) # Define limits of the interval around the peak: # (sigmaSmaller is half the radius of the embryo) minC = [p[i] - sigmaSmaller for i in range(img.numDimensions())] maxC = [p[i] + sigmaSmaller for i in range(img.numDimensions())] # View the interval around the peak, as a flat iterable (like an array) fov = Views.interval(img, minC, maxC) # Compute sum of pixel intensity values of the interval # (The t is the Type that mediates access to the pixels, via its get* methods) s = sum(t.getInteger() for t in fov) # Add to results table table.incrementCounter() table.addValue("x", p[0]) table.addValue("y", p[1]) table.addValue("sum", s) table.show("Embryo intensities at peaks")
def run(): IJ.run("Close All", "") IJ.log("\\Clear") IJ.log("Find_close_peaks") imp = IJ.run("Bio-Formats Importer") imp = IJ.getImage() Channel_1, Channel_2, radius_background, sigmaSmaller, sigmaLarger, minPeakValue, min_dist = getOptions() IJ.log("option used:" \ + "\n" + "channel 1:" + str(Channel_1) \ + "\n" + "channel 2:"+ str(Channel_2) \ + "\n" + "Radius Background:"+ str(radius_background) \ + "\n" + "Smaller Sigma:"+ str(sigmaSmaller) \ + "\n" + "Larger Sigma:"+str(sigmaLarger) \ + "\n" + "Min Peak Value:"+str(minPeakValue) \ + "\n" + "Min dist between peaks:"+str(min_dist)) IJ.log("Computing Max Intensity Projection") if imp.getDimensions()[3] > 1: imp_max = ZProjector.run(imp,"max") #imp_max = IJ.run("Z Project...", "projection=[Max Intensity]") #imp_max = IJ.getImage() else: imp_max = imp ip1, ip2 = extract_channel(imp_max, Channel_1, Channel_2) imp1, imp2 = back_substraction(ip1, ip2, radius_background) imp1.show() imp2.show() IJ.log("Finding Peaks") ip1_1, ip2_1, peaks_1, peaks_2 = find_peaks(imp1, imp2, sigmaSmaller, sigmaLarger, minPeakValue) # Create a PointRoi from the DoG peaks, for visualization roi_1 = PointRoi(0, 0) roi_2 = PointRoi(0, 0) roi_3 = PointRoi(0, 0) roi_4 = PointRoi(0, 0) # A temporary array of integers, one per dimension the image has p_1 = zeros(ip1_1.numDimensions(), 'i') p_2 = zeros(ip2_1.numDimensions(), 'i') # Load every peak as a point in the PointRoi for peak in peaks_1: # Read peak coordinates into an array of integers peak.localize(p_1) roi_1.addPoint(imp1, p_1[0], p_1[1]) for peak in peaks_2: # Read peak coordinates into an array of integers peak.localize(p_2) roi_2.addPoint(imp2, p_2[0], p_2[1]) # Chose minimum distance in pixel #min_dist = 20 for peak_1 in peaks_1: peak_1.localize(p_1) for peak_2 in peaks_2: peak_2.localize(p_2) d1 = distance(p_1, p_2) if d1 < min_dist: roi_3.addPoint(imp1, p_2[0], p_2[1]) break for peak_2 in peaks_2: peak_2.localize(p_2) for peak_1 in peaks_1: peak_1.localize(p_1) d2 = distance(p_2, p_1) if d2 < min_dist: roi_4.addPoint(imp1, p_2[0], p_2[1]) break rm = RoiManager.getInstance() if not rm: rm = RoiManager() rm.reset() rm.addRoi(roi_1) rm.addRoi(roi_2) rm.addRoi(roi_3) rm.addRoi(roi_4) rm.select(0) rm.rename(0, "ROI neuron") rm.runCommand("Set Color", "yellow") rm.select(1) rm.rename(1, "ROI glioma") rm.runCommand("Set Color", "blue") rm.select(2) rm.rename(2, "ROI glioma touching neurons") rm.runCommand("Set Color", "red") rm.select(3) rm.rename(3, "ROI neurons touching glioma") rm.runCommand("Set Color", "green") rm.runCommand(imp1, "Show All") #Change distance to be in um cal = imp.getCalibration() min_distance = str(round((cal.pixelWidth * min_dist),1)) table = ResultsTable() table.incrementCounter() table.addValue("Numbers of Neuron Markers", roi_1.getCount(0)) table.addValue("Numbers of Glioma Markers", roi_2.getCount(0)) table.addValue("Numbers of Glioma within %s um of Neurons" %(min_distance), roi_3.getCount(0)) table.addValue("Numbers of Neurons within %s um of Glioma" %(min_distance), roi_4.getCount(0)) table.show("Results Analysis")