def pixel_collector(rm, channel_imp, channel_name, impname, folder): # define new Results table rt = ResultsTable() IndRois = rm.getIndexes() for index in IndRois: ROI = rm.getRoi(index) ROI_name = ROI.getName() coords = ROI.getContainedPoints() row = 0 for pixel in coords: x_coord = pixel.getX() y_coord = pixel.getY() rt.setValue(ROI_name + "_X_pos", row, int(x_coord)) rt.setValue(ROI_name + "_Y_pos", row, int(y_coord)) pixel_2 = channel_imp.getProcessor().getPixel( int(x_coord), int(y_coord)) rt.setValue(ROI_name + "_" + channel_name, row, pixel_2) row = row + 1 rt.show("Results") rt.save(os.path.join(folder, impname + '_' + channel_name + "_pixels.csv")) print "Pixel collection done!"
def run(): global srcFile, ext, numberOfWidthMeasurements IJ.run("Set Measurements...", "area mean standard modal min centroid center perimeter bounding fit shape feret's integrated median skewness kurtosis area_fraction display redirect=None decimal=3"); IJ.setForegroundColor(255,255,255); IJ.setBackgroundColor(0,0,0); IJ.run("Options...", "iterations=1 count=1 black"); table = ResultsTable() srcDir = srcFile.getAbsolutePath() for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Check for file extension if not filename.endswith(ext): continue # Check for file name pattern process(srcDir, root, filename, table, numberOfWidthMeasurements) table.save(os.path.join(srcDir, 'Results.xls'))
def process(self,imp): # extract nucleus channel, 8-bit and twice binned imp.setC(self.nucleusChannel) ip = imp.getChannelProcessor().duplicate() ip = ip.convertToByteProcessor() ip = ip.bin(4) nucleus = ImagePlus("nucleus_channel", ip) # threshold image and separate clumped nuclei IJ.run(nucleus, "Auto Threshold", "method=Otsu white setthreshold show"); IJ.run(nucleus, "Make Binary", "thresholded remaining black"); IJ.run(nucleus, "Watershed", ""); directory = imp.getTitle() directory = directory.replace(" ", "_")\ .replace(",", "_")\ .replace("#", "_series")\ .replace("...", "")\ .replace(".","_") directory = os.path.join(self.exportDir, directory) sliceDirectory = os.path.join(directory, "slices") print directory print sliceDirectory if not os.path.exists(sliceDirectory): os.makedirs(sliceDirectory) # Create a table to store the results table = ResultsTable() # Create a hidden ROI manager, to store a ROI for each blob or cell #roim = RoiManager(True) # remove small particles and border particles pa = ParticleAnalyzer(\ ParticleAnalyzer.ADD_TO_MANAGER | ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES,\ Measurements.CENTER_OF_MASS,\ table,\ self.minArea, self.maxArea,\ 0.0,1.0) if pa.analyze(nucleus): print "All ok, number of particles: ", table.size() else: print "There was a problem in analyzing", imp, nucleus table.save(os.path.join(directory, "rt.csv")) # read the center of mass coordinates cmx = table.getColumn(0) cmy = table.getColumn(1) if self.debug: imp.show() i=0 for i in range(0, min(self.nCells,table.size())): # ROI around the cell cmx = table.getValue("XM",i) cmy = table.getValue("YM",i) x = 4 * cmx - (self.boxSize - 1) / 2 y = 4 * cmy - (self.boxSize - 1) / 2 if (x < self.edge or y < self.edge or x > imp.getWidth() - self.edge or y > imp.getHeight() - self.edge): continue roi = Roi(x,y,self.boxSize,self.boxSize) imp.setRoi(roi, False) cellStack = ImageStack(self.boxSize, self.boxSize) for z in range(1, imp.getNSlices() + 1): imp.setSlice(z) for c in range(1, imp.getNChannels() + 1): imp.setC(c) # copy ROI to stack imp.copy() impSlice = imp.getClipboard() cellStack.addSlice(impSlice.getProcessor()) if self.slices: sliceTitle = "cell_%s_z%s_c%s" % (str(i).zfill(4), str(z).zfill(3), str(c)) print sliceTitle IJ.saveAsTiff(impSlice, os.path.join(sliceDirectory, sliceTitle)) impSlice.close() title = "cell_" + str(i).zfill(4) cell = ImagePlus(title, cellStack) # save ROI image IJ.saveAsTiff(cell, os.path.join(directory, title)) cell.close() if self.debug: imp.updateAndDraw() wait = Wait("particle done") wait.show()
def saveresults(dir, name): outfile = os.path.join(dir, "{}.csv".format(name)) res = ResultsTable.getResultsTable() ResultsTable.save(res, outfile) ResultsTable.reset(res)
def main(): # Prepare directory tree for output. indir = IJ.getDirectory("input directory") outdir = IJ.getDirectory(".csv output directory") nucdir = os.path.join(outdir, "nuclei") bacdir = os.path.join(outdir, "bacteria") rufdir = os.path.join(outdir, "ruffles") gfpdir = os.path.join(outdir, "gfp") channelsdir = os.path.join(outdir, "channels") if not os.path.isdir(nucdir): os.mkdir(nucdir) if not os.path.isdir(bacdir): os.mkdir(bacdir) if not os.path.isdir(rufdir): os.mkdir(rufdir) if not os.path.isdir(gfpdir): os.mkdir(gfpdir) if not os.path.isdir(channelsdir): os.mkdir(channelsdir) # Collect all file paths in the input directory files = readdirfiles(indir) nucResults = ResultsTable() bacResults = ResultsTable() rufResults = ResultsTable() gfpResults = ResultsTable() for file in files: if file.endswith('ome.tif') or file.endswith('ome.tiff'): imp = stackprocessor(file, nChannels=4, nSlices=7, nFrames=1) channels = ChannelSplitter.split(imp) name = imp.getTitle() IJ.log("Processing image: {}".format(name)) for c in range(len(channels)): IJ.run(channels[c], "Grays", "") IJ.run(channels[c], "Invert", "") jpgname = channels[c].getShortTitle() jpgoutfile = os.path.join(channelsdir, "{}.jpg".format(jpgname)) IJ.saveAs(channels[c].flatten(), "Jpeg", jpgoutfile) IJ.run(channels[c], "Invert", "") nuc = countobjects(channels[0], nucResults, threshMethod="Triangle", subtractBackground=True, # dilate=True, watershed=True, minSize=3.00, maxSize=100, minCirc=0.00, maxCirc=1.00) bac = countobjects(channels[1], bacResults, threshMethod="RenyiEntropy", subtractBackground=False, watershed=False, minSize=0.20, maxSize=30.00, minCirc=0.00, maxCirc=1.00) ruf = countobjects(channels[2], rufResults, threshMethod="RenyiEntropy", minSize=2.00, maxSize=30.00, minCirc=0.20, maxCirc=1.00) gfp = countobjects(channels[3], gfpResults, threshMethod="RenyiEntropy", subtractBackground=False, watershed=True, minSize=0.20, maxSize=30.00, minCirc=0.00, maxCirc=1.00) # binaries = [nuc, bac, ruf, gfp] # channels[0].show() # binaries[0].show() # binMontage = RGBStackMerge().mergeChannels(binaries, False) # binMontage.show() # chsMontage = RGBStackMerge().mergeChannels(channels, False) # binMontage = MontageMaker().makeMontage2(binMontage, # 4, # int columns # 4, # int rows # 1.00, # double scale # 1, # int first # 16, # int last # 1, # int inc # 0, # int borderWidth # False) # boolean labels) # chsMontage = MontageMaker().makeMontage2(chsMontage, # 4, # int columns # 4, # int rows # 1.00, # double scale # 1, # int first # 16, # int last # 1, # int inc # 0, # int borderWidth # False) # boolean labels) # # binMontage.show() # chsMontage.show() outfilenuc = os.path.join(nucdir, "threshold_nuc_{}".format(name)) outfilebac = os.path.join(bacdir, "threshold_bac_{}".format(name)) outfileruf = os.path.join(rufdir, "threshold_ruf_{}".format(name)) outfilegfp = os.path.join(gfpdir, "threshold_gfp_{}".format(name)) IJ.saveAs(nuc.flatten(), "Tiff", outfilenuc) IJ.saveAs(bac.flatten(), "Tiff", outfilebac) IJ.saveAs(ruf.flatten(), "Tiff", outfileruf) IJ.saveAs(gfp.flatten(), "Tiff", outfilegfp) nucResults.show("nuclei") bacResults.show("bacteria") rufResults.show("ruffles") gfpResults.show("gfp") nucout = os.path.join(outdir, "nuclei.csv") bacout = os.path.join(outdir, "bacteria.csv") rufout = os.path.join(outdir, "ruffles.csv") gfpout = os.path.join(outdir, "gfp.csv") ResultsTable.save(nucResults, nucout) ResultsTable.save(bacResults, bacout) ResultsTable.save(rufResults, rufout) ResultsTable.save(gfpResults, gfpout)
def main(): # Prepare directory tree for output. indir = IJ.getDirectory("input directory") outdir = IJ.getDirectory(".csv output directory") c1dir = os.path.join(outdir, "Channel1") c2dir = os.path.join(outdir, "Channel2") c3dir = os.path.join(outdir, "Channel3") c4dir = os.path.join(outdir, "Channel4") channelsdir = os.path.join(outdir, "Channels") if not os.path.isdir(c1dir): os.mkdir(c1dir) if not os.path.isdir(c2dir): os.mkdir(c2dir) if not os.path.isdir(c3dir): os.mkdir(c3dir) if not os.path.isdir(c4dir): os.mkdir(c4dir) if not os.path.isdir(channelsdir): os.mkdir(channelsdir) # Collect all file paths in the input directory files = readdirfiles(indir) # Initialize the results tables. c1Results = ResultsTable() c2Results = ResultsTable() c3Results = ResultsTable() c4Results = ResultsTable() for file in files: IJ.log("File: {}/{}".format(files.index(file) + 1, len(files))) if file.endswith('.tif'): # Open .tiff file as ImagePlus. imp = Opener().openImage(file) imp = ZProjector.run(imp, "max") # imp = stackprocessor(file, # nChannels=4, # nSlices=7, # nFrames=1) channels = ChannelSplitter.split(imp) name = imp.getTitle() # For every channel, save the inverted channel in grayscale as .jpg. for channel in channels: IJ.run(channel, "Grays", "") IJ.run(channel, "Invert", "") jpgname = channel.getShortTitle() jpgoutfile = os.path.join(channelsdir, "{}.jpg".format(jpgname)) IJ.saveAs(channel.flatten(), "Jpeg", jpgoutfile) IJ.run(channel, "Invert", "") # OPTIONAL - Perform any other operations (e.g. crossexcitation compensation tasks) before object count. c2name = channels[2].getTitle() cal = channels[2].getCalibration() channels[2] = ImagePlus( c2name, ImageCalculator().run("divide create 32-bit", channels[2], channels[3]).getProcessor( ) # This removes AF647 bleed-through ) channels[2].setCalibration(cal) # Settings for channel1 threshold. c1 = countobjects(channels[0], c1Results, threshMethod="Triangle", subtractBackground=True, watershed=True, minSize=0.00, maxSize=100, minCirc=0.00, maxCirc=1.00) # Settings for channel2 threshold. c2 = countobjects(channels[1], c2Results, threshMethod="RenyiEntropy", subtractBackground=True, watershed=False, minSize=0.00, maxSize=30.00, minCirc=0.00, maxCirc=1.00) # Settings for channel3 threshold. c3 = countobjects(channels[2], c3Results, threshMethod="RenyiEntropy", subtractBackground=True, watershed=False, minSize=0.00, maxSize=30.00, minCirc=0.00, maxCirc=1.00) # Settings for channel4 threshold. c4 = countobjects(channels[3], c4Results, threshMethod="RenyiEntropy", subtractBackground=True, watershed=False, minSize=0.20, maxSize=100.00, minCirc=0.00, maxCirc=1.00) # Format filenames for thresholded .tiff files. outfileC1 = os.path.join(c1dir, "threshold_c1_{}".format(name)) outfileC2 = os.path.join(c2dir, "threshold_c2_{}".format(name)) outfileC3 = os.path.join(c3dir, "threshold_c3_{}".format(name)) outfileC4 = os.path.join(c4dir, "threshold_c4_{}".format(name)) # Save thresholded .tiff files. IJ.saveAs(c1.flatten(), "Tiff", outfileC1) IJ.saveAs(c2.flatten(), "Tiff", outfileC2) IJ.saveAs(c3.flatten(), "Tiff", outfileC3) IJ.saveAs(c4.flatten(), "Tiff", outfileC4) # Show results tables. # c1Results.show("channel1") # c2Results.show("channel2") # c3Results.show("channel3") # c4Results.show("channel4") # Prepare results table filenames. c1out = os.path.join(outdir, "channel1.csv") c2out = os.path.join(outdir, "channel2.csv") c3out = os.path.join(outdir, "channel3.csv") c4out = os.path.join(outdir, "channel4.csv") # Save results tables. ResultsTable.save(c1Results, c1out) ResultsTable.save(c2Results, c2out) ResultsTable.save(c3Results, c3out) ResultsTable.save(c4Results, c4out)
def process(srcDir, dstDir, currentDir, fileName, keepDirectories, Channel_1, Channel_2, radius_background, sigmaSmaller, sigmaLarger, minPeakValue, min_dist): IJ.run("Close All", "") # Opening the image IJ.log("Open image file:" + fileName) #imp = IJ.openImage(os.path.join(currentDir, fileName)) #imp = IJ.getImage() imp = BF.openImagePlus(os.path.join(currentDir, fileName)) imp = imp[0] # getDimensions(width, height, channels, slices, frames) IJ.log("Computing Max Intensity Projection") if imp.getDimensions()[3] > 1: imp_max = ZProjector.run(imp,"max") else: imp_max = imp ip1, ip2 = extract_channel(imp_max, Channel_1, Channel_2) IJ.log("Substract background") imp1, imp2 = back_substraction(ip1, ip2, radius_background) IJ.log("Finding Peaks") ip1_1, ip2_1, peaks_1, peaks_2 = find_peaks(imp1, imp2, sigmaSmaller, sigmaLarger, minPeakValue) # Create a PointRoi from the DoG peaks, for visualization roi_1 = PointRoi(0, 0) roi_2 = PointRoi(0, 0) roi_3 = PointRoi(0, 0) roi_4 = PointRoi(0, 0) # A temporary array of integers, one per dimension the image has p_1 = zeros(ip1_1.numDimensions(), 'i') p_2 = zeros(ip2_1.numDimensions(), 'i') # Load every peak as a point in the PointRoi for peak in peaks_1: # Read peak coordinates into an array of integers peak.localize(p_1) roi_1.addPoint(imp1, p_1[0], p_1[1]) for peak in peaks_2: # Read peak coordinates into an array of integers peak.localize(p_2) roi_2.addPoint(imp2, p_2[0], p_2[1]) # Chose minimum distance in pixel #min_dist = 20 for peak_1 in peaks_1: peak_1.localize(p_1) for peak_2 in peaks_2: peak_2.localize(p_2) d1 = distance(p_1, p_2) if d1 < min_dist: roi_3.addPoint(imp1, p_2[0], p_2[1]) break for peak_2 in peaks_2: peak_2.localize(p_2) for peak_1 in peaks_1: peak_1.localize(p_1) d2 = distance(p_2, p_1) if d2 < min_dist: roi_4.addPoint(imp1, p_2[0], p_2[1]) break cal = imp.getCalibration() min_distance = str(round((cal.pixelWidth * min_dist),1)) table = ResultsTable() table.incrementCounter() table.addValue("Numbers of Neuron Markers", roi_1.getCount(0)) table.addValue("Numbers of Glioma Markers", roi_2.getCount(0)) table.addValue("Numbers of Glioma within %s um of Neurons" %(min_distance), roi_3.getCount(0)) table.addValue("Numbers of Neurons within %s um of Glioma" %(min_distance), roi_4.getCount(0)) #table.show("Results Analysis") saveDir = currentDir.replace(srcDir, dstDir) if keepDirectories else dstDir if not os.path.exists(saveDir): os.makedirs(saveDir) IJ.log("Saving to" + saveDir) table.save(os.path.join(saveDir, fileName + ".csv")) IJ.selectWindow("Log") IJ.saveAs("Text", os.path.join(saveDir, fileName + ".csv"));
resultsTable.incrementCounter() resultsTable.addValue("Threshold", localBackground) resultsTable.addValue("Seed radius", seedRadius) resultsTable.addValue("GXY", gaussXY) resultsTable.addValue("GZ", gaussZ) resultsTable.addValue("TOTAL", results[i]['all']) resultsTable.addValue("0-250", results[i]['0']) resultsTable.addValue("251-500", results[i]['250']) resultsTable.addValue("501-750", results[i]['500']) resultsTable.addValue("751-1000", results[i]['750']) resultsTable.addValue("1001-1500", results[i]['1000']) resultsTable.addValue(">1501", results[i]['1500']) resultsTable.addValue("Skipped", results[i]['edge']) resultsTable.save(options['outputDir'] + options['outputFile']) else: parametersTable = ResultsTable() parametersTable.showRowNumbers(False) parameters = makeParameters(options) for i in range(0, len(parameters)): parametersTable.incrementCounter() parametersTable.addValue("Threshold", parameters[i]['localBackground']) parametersTable.addValue("Seed radius", parameters[i]['seedRadius']) parametersTable.addValue("GXY", parameters[i]['gaussXY']) parametersTable.addValue("GZ", parameters[i]['gaussZ']) parametersTable.save(options['paramsOut'])
#consol.setValue("foci_count", currcell, int(consol.getValue("foci_count", currcell))+1) IJ.selectWindow("Results") IJ.run("Close") for count in range(consol.size()): inrange = consol.getValue("is_in_range", count) if (inrange == 1): count_range = count_range + 1 foci = consol.getValue("foci_count", count) if foci <= 3: cellsperfoci_range[foci] = cellsperfoci_range[foci] + 1 else: cellsperfoci_range[ "more"] = cellsperfoci_range["more"] + 1 # save the summary results table consol.save(directory + "/" + filename + "_summary.csv") # reset the ROI Manager, close it and go to next file (if there is one) rm.runCommand("Reset") rm.close() if any(x.endswith(ext) for x in filenamelist[counter]): fp = open(srcDir + "/total_summary.csv", "w") fp.write("total cells, " + str(countcells) + "\n\n") if (linechannel > 0): fp.write("cells in range, " + str(count_range) + "\n") fp.write("cells with foci and in range, " + str(cellsperfoci_range[1] + cellsperfoci_range[2] + cellsperfoci_range[3]) + "\n\n") for i in range(4): fp.write("cells with " + str(i) + " foci and in range, " + str(cellsperfoci_range[i]) + "\n")
def main(): rt = RT.open2(table_file.getAbsolutePath()) if not rt: return log(" --- --- --- ") log("Loaded %s" % table_file.getAbsolutePath()) log("Loading column lists...") # Get column indices from imported file headings = getColumnHeadings(rt) id_col = getColumnIndex(headings, "TID") t_col = getColumnIndex(headings, "t [") d2p_col = getColumnIndex(headings, "D2P [") angle_col = getColumnIndex(headings, u'\u03B1 [deg]') delta_col = getColumnIndex(headings, u'\u0394\u03B1 [deg]') if angle_col == RT.COLUMN_NOT_FOUND: log("Failed to detect index for angle column. Re-trying...") angle_col = getColumnIndex(headings, u'? [deg]') if delta_col == RT.COLUMN_NOT_FOUND: log("Failed to detect index for delta angle column. Re-trying...") delta_col = getColumnIndex(headings, u'?? [deg]') log("Last column index is %s" % rt.getLastColumn()) if RT.COLUMN_NOT_FOUND in (id_col, d2p_col, delta_col, angle_col): uiservice.showDialog("Error: Some key columns were not found!", "Invalid Table?") return log("Settings: BOUT_WINDOW= %s, MIN_D2P= %s, DEF_FRAME_INTERVAL= %s" % (BOUT_WINDOW, '{0:.4f}'.format(MIN_D2P), DEF_FRAME_INTERVAL)) # Store all data on dedicated lists track_id_rows = rt.getColumnAsDoubles(id_col) d2p_rows = rt.getColumnAsDoubles(d2p_col) angle_rows = rt.getColumnAsDoubles(angle_col) delta_rows = rt.getColumnAsDoubles(delta_col) t_rows = rt.getColumnAsDoubles(t_col) # Assess n of data points and extract unique path ids n_rows = len(track_id_rows) row_indices = range(n_rows) track_ids = set(track_id_rows) n_tracks = len(track_ids) log("Table has %g rows" % n_rows) log("Table has %g tracks" % n_tracks) log("Parsing tracks...") for track_id in track_ids: for row, next_row in zip(row_indices, row_indices[1:]): if track_id_rows[row] != track_id: continue if not isNumber(angle_rows[row]): rt.setValue("FLAG", row, "NA") continue lower_bound = max(0, row - BOUT_WINDOW + 1) upper_bound = min(n_rows - 1, row + BOUT_WINDOW) win_d2p = [] for _ in range(lower_bound, upper_bound): win_d2p.append(d2p_rows[row]) if sum(win_d2p) <= MIN_D2P * len(win_d2p): rt.setValue("FLAG", row, 0) else: current_angle = angle_rows[row] next_angle = angle_rows[next_row] current_delta = delta_rows[row] flag = -1 if current_angle < 0 else 1 delta_change = (abs(current_delta) > 90) same_sign = ((current_angle < 0) == (next_angle < 0)) if delta_change and not same_sign: flag *= -1 rt.setValue("FLAG", row, flag) if next_row == n_rows - 1: rt.setValue("FLAG", next_row, flag) if rt.save(table_file.getAbsolutePath()): log("Processed table successfully saved (file overwritten)") else: log("Could not override input file. Displaying it...") rt.show(table_file.name) log("Creating onset table...") onset_rt = RT() onset_rt.showRowNumbers(False) frame_int = DEF_FRAME_INTERVAL if "table" in frame_rate_detection: frame_int = getFrameIntervalFromTable(row_indices, track_id_rows, t_rows) elif "image" in frame_rate_detection: frame_int = getFrameIntervalFromImage(image_file.getAbsolutePath()) else: log("Using default frame rate") for track_id in track_ids: for prev_row, row in zip(row_indices, row_indices[1:]): if not track_id in (track_id_rows[prev_row], track_id_rows[row]): continue flag = rt.getValue("FLAG", row) if not isNumber(flag): continue flag = int(flag) if flag == 0: continue if flag == 1 or flag == -1: srow = onset_rt.getCounter() onset_rt.incrementCounter() onset_rt.setValue("TID", srow, track_id) from_frame = int(t_rows[prev_row] / frame_int) + 1 to_frame = int(t_rows[row] / frame_int) + 1 onset_rt.setValue("First disp. [t]", srow, "%s to %s" % (t_rows[prev_row], t_rows[row])) onset_rt.setValue("First disp. [frames]", srow, "%s to %s" % (from_frame, to_frame)) onset_rt.setValue("ManualTag", srow, "") break out_path = suffixed_path(table_file.getAbsolutePath(), "ManualTagging") if onset_rt.save(out_path): log("Summary table successfully saved: %s" % out_path) else: log("File not saved... Displaying onset table") onset_rt.show("Onsets %s" % table_file.name)
img410PA.show() img470PA.show() # MORPHOLOGICAL MEASUREMENTS # These are done on the rotated image mask scales = { "4x4": 2.58, "2x2": 1.29 } morphTable = ResultsTable() PA.setResultsTable(morphTable) maskPA.show() IJ.run(maskPA, "Select None", "") IJ.run(maskPA, "Set Scale...", "distance=1 known=" + str(scales[binning]) + " pixel=1 unit=μm"); #TODO scale? IJ.run(maskPA, "Set Measurements...", "area centroid center perimeter bounding fit shape feret's median skewness kurtosis scientific redirect=None decimal=7"); IJ.run(maskPA, "Analyze Particles...", "size=0-Infinity circularity=0.00-1.00 show=Nothing stack"); morph_headings = morphTable.getColumnHeadings().strip(' ').split('\t') for m_heading in morph_headings[1:]: # because of formatting, the first is empty addValues(dataTable, m_heading, [morphTable.getValue(m_heading, i) for i in range(morphTable.size())]) for imPlus in [img410PA, img470PA, maskPA]: cropStack(imPlus) IJ.saveAsTiff(imPlus, os.path.join(PARENT_DIR, imPlus.getTitle())) dataTable.show("Measurements") dataTable.save(os.path.join(PARENT_DIR, 'measurements.csv'))
def main(): rt = RT.open2(table_file.getAbsolutePath()) if not rt: return log(" --- --- --- ") log("Loaded %s" % table_file.getAbsolutePath()) log("Loading column lists...") # Get column indices from imported file headings = getColumnHeadings(rt) id_col = getColumnIndex(headings, "TID") t_col = getColumnIndex(headings, "t [") d2p_col = getColumnIndex(headings, "D2P [") angle_col = getColumnIndex(headings, u'\u03B1 [deg]') delta_col = getColumnIndex(headings, u'\u0394\u03B1 [deg]') if angle_col == RT.COLUMN_NOT_FOUND: log("Failed to detect index for angle column. Re-trying...") angle_col = getColumnIndex(headings, u'? [deg]') if delta_col == RT.COLUMN_NOT_FOUND: log("Failed to detect index for delta angle column. Re-trying...") delta_col = getColumnIndex(headings, u'?? [deg]') log("Last column index is %s" % rt.getLastColumn()) if RT.COLUMN_NOT_FOUND in (id_col, d2p_col, delta_col, angle_col): uiservice.showDialog("Error: Some key columns were not found!", "Invalid Table?") return log("Settings: BOUT_WINDOW= %s, MIN_D2P= %s, DEF_FRAME_INTERVAL= %s" % (BOUT_WINDOW, '{0:.4f}'.format(MIN_D2P), DEF_FRAME_INTERVAL)) # Store all data on dedicated lists track_id_rows = rt.getColumnAsDoubles(id_col) d2p_rows = rt.getColumnAsDoubles(d2p_col) angle_rows = rt.getColumnAsDoubles(angle_col) delta_rows = rt.getColumnAsDoubles(delta_col) t_rows = rt.getColumnAsDoubles(t_col) # Assess n of data points and extract unique path ids n_rows = len(track_id_rows) row_indices = range(n_rows) track_ids = set(track_id_rows) n_tracks = len(track_ids) log("Table has %g rows" % n_rows) log("Table has %g tracks" % n_tracks) log("Parsing tracks...") for track_id in track_ids: for row, next_row in zip(row_indices, row_indices[1:]): if track_id_rows[row] != track_id: continue if not isNumber(angle_rows[row]): rt.setValue("FLAG", row, "NA") continue lower_bound = max(0, row - BOUT_WINDOW + 1) upper_bound = min(n_rows-1, row + BOUT_WINDOW) win_d2p = [] for _ in range(lower_bound, upper_bound): win_d2p.append(d2p_rows[row]) if sum(win_d2p) <= MIN_D2P * len(win_d2p): rt.setValue("FLAG", row, 0) else: current_angle = angle_rows[row] next_angle = angle_rows[next_row] current_delta = delta_rows[row] flag = -1 if current_angle < 0 else 1 delta_change = (abs(current_delta) > 90) same_sign = ((current_angle<0) == (next_angle<0)) if delta_change and not same_sign: flag *= -1 rt.setValue("FLAG", row, flag) if next_row == n_rows - 1: rt.setValue("FLAG", next_row, flag) if rt.save(table_file.getAbsolutePath()): log("Processed table successfully saved (file overwritten)") else: log("Could not override input file. Displaying it...") rt.show(table_file.name) log("Creating onset table...") onset_rt = RT() onset_rt.showRowNumbers(False) frame_int = DEF_FRAME_INTERVAL if "table" in frame_rate_detection: frame_int = getFrameIntervalFromTable(row_indices, track_id_rows, t_rows) elif "image" in frame_rate_detection: frame_int = getFrameIntervalFromImage(image_file.getAbsolutePath()) else: log("Using default frame rate") for track_id in track_ids: for prev_row, row in zip(row_indices, row_indices[1:]): if not track_id in (track_id_rows[prev_row], track_id_rows[row]): continue flag = rt.getValue("FLAG", row) if not isNumber(flag): continue flag = int(flag) if flag == 0: continue if flag == 1 or flag == -1: srow = onset_rt.getCounter() onset_rt.incrementCounter() onset_rt.setValue("TID", srow, track_id) from_frame = int(t_rows[prev_row]/frame_int) + 1 to_frame = int(t_rows[row]/frame_int) + 1 onset_rt.setValue("First disp. [t]", srow, "%s to %s" % (t_rows[prev_row], t_rows[row])) onset_rt.setValue("First disp. [frames]", srow, "%s to %s" % (from_frame, to_frame)) onset_rt.setValue("ManualTag", srow, "") break out_path = suffixed_path(table_file.getAbsolutePath(), "ManualTagging") if onset_rt.save(out_path): log("Summary table successfully saved: %s" % out_path) else: log("File not saved... Displaying onset table") onset_rt.show("Onsets %s" % table_file.name)
# Parse spots to insert values as objects for trackID in trackIDs: track = model.getTrackModel().trackSpots(trackID) # Sort by frame sortedTrack = list(track) sortedTrack.sort(key=lambda s: s.getFeature("FRAME")) for spot in sortedTrack: results.incrementCounter() results.addValue(ID_COLUMN, "" + str(spot.ID())) # results.addValue(CELL_LABEL_COLUMN,str(int(spot.getFeature("MAX_INTENSITY")))) results.addValue(CELL_LABEL_COLUMN, spot.getName()) results.addValue(TRACK_ID_COLUMN, "" + str(trackID)) for feature in FEATURES: val = spot.getFeature(feature) if math.isnan(val): results.addValue(feature.lower(), "None") else: results.addValue(feature.lower(), "" + str(int(val))) parents = [] children = [] for edge in model.getTrackModel().edgesOf(spot): source, target = model.getTrackModel().getEdgeSource( edge), model.getTrackModel().getEdgeTarget(edge) if source != spot: parents.append(source.ID()) results.addValue("parent_ids", str(parents)) results.save(output_path)