def test_maskArea(): """ Test of the function maskArea """ # 8bit test imp = IJ.openImage(filePathTest.getAbsolutePath()) ip = imp.getProcessor().duplicate() impMask = mask( ip, 1, ip.maxValue() ) impMask.show() outMaskArea = maskArea(impMask.getProcessor()) outArea = area(impMask.getProcessor()) print(outMaskArea) print(outArea) print(outMaskArea/outArea) # 16bit test imp = IJ.openImage(filePath.getAbsolutePath()) stack = imp.getStack() ip = stack.getProcessor(1).duplicate() sizeZ = imp.getStackSize() impMask = mask( ip, 1, ip.maxValue() ) impMask.show() outMaskArea = maskArea( impMask.getProcessor() ) outArea = area( impMask.getProcessor() ) print(outMaskArea) print(outArea) print(outMaskArea/outArea)
def findSeptum(root, show, pos, n = 4): from ij import IJ corrImg = IJ.openImage(root + "/%s_SegAnalysis/%s/CorrelationImage.tif"% (bf_prefix, pos)) IJ.run(corrImg, "8-bit", ""); #estimate_width extend_line # these parameters can be added also cmd = "line_width=10 high_contrast=250 low_contrast=50 show_junction_points show_ids add_to_manager make_binary method_for_overlap_resolution=NONE sigma=3 lower_threshold=0 upper_threshold=1.36 minimum_line_length=30 maximum=60" if show: cmd += " displayresults" IJ.run(corrImg, "Ridge Detection", cmd); binarylineImg = IJ.getImage() IJ.run(binarylineImg, "Invert", ""); binaryImg = IJ.openImage(root + "/%s_SegAnalysis/%s/BinaryImage.tif" % (bf_prefix, pos)) binaryImg.show() IJ.run("Add Image...", "x=0 y=0 opacity=100 zero"); binaryImg.hide() binarylineImg.hide() imp2 = binaryImg.flatten(); IJ.run(imp2, "8-bit", ""); for i in range(n): IJ.run(imp2, "Erode", ""); for j in range(n): IJ.run(imp2, "Dilate", ""); IJ.saveAsTiff(imp2, d.getPath() + "/%s_SegAnalysis/%s/BinaryImage_with_sep.tif" % (bf_prefix, pos));
def run(): srcDir = DirectoryChooser("Choose!").getDirectory() if not srcDir: # user canceled dialog return # Assumes all files have the same size filepaths = [] pattern = re.compile('ch1(.*)_(.*)transformed.mha') for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Skip non-TIFF files match = re.search(pattern, filename) if (match == None) or (match.group(1) == None): continue print(filename) path = os.path.join(root, filename) filepaths.append(path) # Upon finding the first image, initialize the VirtualStack vs = None sorted_filepaths = sorted(filepaths) for f in sorted(filepaths): IJ.openImage(f) print(f.split('\\')[-1]) imp = IJ.getImage() if vs is None: vs = ImageStack(imp.width, imp.height) # Add a slice, relative to the srcDir vs.addSlice(f.split('\\')[-1],imp.getProcessor()) imp.hide() ImagePlus("Stack from subdirectories", vs).show()
def z_stack_opener(path, numberOfSlices="1", file_name_string="%s-Site_%s_%s_z%s.tif", file_name_variables=[]): """ Opens a series of tifs into a stack. args: path -- path to files numberOfSlices -- the number of slices to open min 1 file_name_string -- string with %s formating spaces, z must be last file_name_variables -- list of variables to fill in %s spaces returns: z_stack -- imageJ stack -- stack of processors paths -- list of paths to the files that were opened """ paths = [] full_file_name_string = os.path.join(path, file_name_string) ## open the first slice all_name_variables = tuple(file_name_variables + ["1"]) img = IJ.openImage(full_file_name_string % all_name_variables) ## open a stack z_stack = ImageStack(img.width, img.height) z_stack.addSlice(file_name_string % all_name_variables, img.getProcessor()) paths.append(full_file_name_string % all_name_variables) ## open the other slices for i in range(2, numberOfSlices + 1): sliceNameVariables = tuple(file_name_variables + [str(i)]) img = IJ.openImage(full_file_name_string % sliceNameVariables) z_stack.addSlice(file_name_string % sliceNameVariables, img.getProcessor()) paths.append(full_file_name_string % sliceNameVariables) return z_stack, paths
def estimate_scale_multiplier(acquisition_path, resized_path): resized_image = IJ.openImage(resized_path) resized_dim = resized_image.getDimensions() resized_image.close() acquisition_image = IJ.openImage(acquisition_path) acquisition_dim = acquisition_image.getDimensions() acquisition_image.close() if ((resized_dim[0] == resized_dim[1]) and (acquisition_dim[0] == acquisition_dim[1])): scale = float(acquisition_dim[0])/float(resized_dim[0]) else: scale = -1 return([scale,resized_dim[0],acquisition_dim[0]])
def estimate_scale_multiplier(acquisition_path, resized_path): resized_image = IJ.openImage(resized_path) resized_dim = resized_image.getDimensions() resized_image.close() acquisition_image = IJ.openImage(acquisition_path) acquisition_dim = acquisition_image.getDimensions() cal = acquisition_image.getCalibration() acquisition_image.close() if ((resized_dim[0] == resized_dim[1]) and (acquisition_dim[0] == acquisition_dim[1])): scale = float(acquisition_dim[0])/float(resized_dim[0]) else: scale = -1 return([scale,resized_dim[0],acquisition_dim[0],cal.pixelWidth,cal.pixelHeight,cal.pixelDepth,cal.getUnit()])
def run(): sId = IJ.getString("Filenames contain:", "T0000"); srcDir = DirectoryChooser("Choose!").getDirectory() if not srcDir: # user canceled dialog return # Assumes all files have the same size stack = None for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Skip non-TIFF files if not (sId in filename): continue print(filename) path = os.path.join(root, filename) # Upon finding the first image, initialize the VirtualStack imp = IJ.openImage(path) if stack is None: # stack = VirtualStack(imp.width, imp.height, None, srcDir) stack = ImageStack(imp.width, imp.height) # Add a slice to the virtual stack, relative to the srcDir # #stack.addSlice(path[len(srcDir):]) # Add a slice to the real stack # stack.addSlice(filename, imp.getProcessor()) # Make a ImagePlus from the stack ImagePlus("Stack from subdirectories", stack).show()
def receive_image(): print "recieving an image ....." the_data=request.data fh = open("imageToSave.jpg", "wb") fh.write(the_data) fh.close() imp=IJ.openImage("C:/Users/Lynn/Documents/GitHub/server/imageToSave.jpg") IJ.run(imp,"8-bit","") IJ.run(imp,"Subtract Background...", "rolling=100 light disable") IJ.setAutoThreshold(imp,"Default") IJ.setAutoThreshold(imp,"Triangle") IJ.setThreshold(imp,0, 237) #Macro.setOptions("BlackBackground") IJ.run(imp, "Convert to Mask","") IJ.run(imp, "Convert to Mask","") IJ.run(imp, "Make Binary","") IJ.run(imp,"Fill Holes","") IJ.run(imp,"Watershed","") #IJ.run(imp,"Analyze Particles...", "size=2000-20000 display clear summarize add") IJ.run(imp,"Analyze Particles...", "size=6000-18000 circularity=0.07-0.25 show=[Overlay Outlines] display clear summarize") imp.show() FileSaver(imp).saveAsJpeg("C:/Users/Lynn/Documents/GitHub/server/imageToSave3.jpg") with open("imageToSave3.jpg", "rb") as imageFile: to_be_sent_str = imageFile.read() #print imp test=bytearray(to_be_sent_str) print "done" #RoiManager.runCommand("Show All with labels") #RoiManager.runCommand("Show All") #return "other" return test
def run(): printLog("=====ZY_CreatTemp_V2====",0) #Prompt user to open a image od = OpenDialog("Choose image file", None) if od is None: msgStr = "User Cancled" printLog(msgStr,1) else: sourceFolder = od.getDirectory() fileName = od.getFileName() imp = IJ.openImage(sourceFolder+fileName) imp.show() n = imp.getNSlices() printLog("Processing source file: " + fileName,1) imp2= pickSlice(imp) if imp2: destFolder = os.path.join(sourceFolder, 'Temps') #outName = os.path.join(destFolder,fileName[:-4]+'_temp.tif') #remove the .tif in filename outName = os.path.join(destFolder,'temp.tif') # check or make the folder if not os.path.isdir(destFolder): os.makedirs(destFolder) #make temp dupNSlice(imp2,n-1) printLog("Saving to: " + outName,1) fs = FileSaver(imp2) fs.saveAsTiffStack(outName) imp2.close() imp.close() msgStr = "ZY_CraetTemp_V2.py is Done." printLog(msgStr,0)
def myrun(self): imp = IJ.openImage(self.path) #open imp if imp is None: print 'ERROR opening file:', self.path return 0 numSlices = imp.getNSlices() if numSlices<2: return 0 middleSlice = int(math.floor(imp.getNSlices() / 2)) #int() is necc., python is f*****g picky imp.show() imp.setSlice(middleSlice) impTitle = imp.getTitle() impWin = WindowManager.getWindow(impTitle) #returns java.awt.Window transformationFile = os.path.basename(self.path) transformationFile = os.path.splitext(transformationFile)[0] + '.txt' transformationFile = '/Users/cudmore/Desktop/out/' + transformationFile stackRegParams = 'stack_1=[%s] action_1=Align file_1=[%s] stack_2=None action_2=Ignore file_2=[] transformation=[Rigid Body] save' %(impWin,transformationFile) IJ.run('MultiStackReg', stackRegParams) imp.close() ''' #20150723, we just aligned on a cropped copy, apply alignment to original imp origImpTitle = imp.getTitle() stackRegParams = 'stack_1=[%s] action_1=[Load Transformation File] file_1=[%s] stack_2=None action_2=Ignore file_2=[] transformation=[Rigid Body]' %(origImpTitle,transformationFile) IJ.run('MultiStackReg', stackRegParams) ''' '''
def batch_open_images(directory, file_type=None, name_filter=None, recursive=False): '''Open all files in the given folder. :param path: The path from were to open the images. String and java.io.File are allowed. :param file_type: Only accept files with the given extension (default: None). :param name_filter: Only accept files that contain the given string (default: None). :param recursive: Process directories recursively (default: False). ''' if isinstance(directory, File): directory = directory.getAbsolutePath() path_to_images = [] directory = os.path.expanduser(directory) directory = os.path.expandvars(directory) if not recursive: for file_name in os.listdir(directory): full_path = os.path.join(directory, file_name) if os.path.isfile(full_path) \ and check_type(file_name, file_type) \ and check_filter(file_name, name_filter): path_to_images.append(full_path) else: for directory, _, file_names in os.walk(directory): for file_name in file_names: full_path = os.path.join(directory, file_name) if check_type(file_name, file_type) \ and check_filter(file_name, name_filter): path_to_images.append(full_path) image_list = [] for img_path in path_to_images: imp = IJ.openImage(img_path) if imp: image_list.append(imp) return image_list
def measure_growth(imgDir, filename = "Fiji_Growth.txt"): """ Collects measurement data in pixels and writes to a file. Uses straightened binary images""" f = open(imgDir + filename, 'w') f.write("Img number\tEnd point (pixels)\n") IJ.run("Set Measurements...", "area mean min center redirect=None decimal=3") index = "000000000" filename = imgDir + "/binary" + "/img_" + index + "__000-padded.tif" while path.exists(filename): imp = IJ.openImage(filename) imp.show() IJ.run("Clear Results") for i in xrange(800): #hard coded to target length for now IJ.makeRectangle(i, 0, 1, 80) IJ.run("Measure") table = RT.getResultsTable() #print "i:", i, "counter:", table.getCounter() maxi = RT.getValue(table, "Max", i) if maxi == 0: f.write(str(int(index)) + "\t" + str(i) + "\n") break IJ.runMacro("while (nImages>0) {selectImage(nImages);close();}") index = to_9_Digits(str(int(index)+1)) filename = imgDir + "/padded" + "/img_" + index + "__000-padded.tif" f.close()
def test_VOP(): # 16bit test imp = IJ.openImage(filePath.getAbsolutePath()) stack = imp.getStack() ip1 = stack.getProcessor(1).duplicate() ip2 = stack.getProcessor(2).duplicate() sizeZ = imp.getStackSize() t1 = ip1.getAutoThreshold()/2 impMask1 = mask( ip1, t1, ip1.maxValue() ) t2 = ip2.getAutoThreshold()/2 impMask2 = mask( ip2, t2, ip2.maxValue() ) cleanMask(impMask1) cleanMask(impMask2) impMask1.show() MaskArea1 = maskArea( impMask1.getProcessor() ) impMask2.show() MaskArea2 = maskArea( impMask2.getProcessor() ) w = Wand(impMask1.getProcessor()) #print(w.allPoints()) round(impMask1.getProcessor().getWidth()/2.0) # w.autoOutline( round(impMask1.getProcessor().getWidth()/2.0) , round( impMask1.getProcessor().getHeight()/2.0) ) # print(w.xpoints()) # print(w.ypoints()) totArea = area( impMask1.getProcessor() ) print("MaskArea1: ", MaskArea1) print("MaskArea2: ", MaskArea2) print("Total Area: ", totArea) mIntersection = maskIntersection( impMask1.getProcessor(), impMask2.getProcessor()) ImagePlus('intersection', mIntersection).show() return mIntersection
def test_HD(): imp = IJ.openImage(filePath.getAbsolutePath()) stack = imp.getStack() ip1 = stack.getProcessor(1).duplicate() ip2 = stack.getProcessor(2).duplicate() [ip1, s] = scaleLongSide(ip1, longSide) if (Scale == 1): ip2 = scale(ip2, s) maskA = impMask1.duplicate() maskB = impMask2.duplicate() # IJ.run(maskA, "Invert", "") # IJ.run(maskB, "Invert", "") IJ.run(maskA, "Outline", "") IJ.run(maskB, "Outline", "") # IJ.run(maskA, "Invert", "") # IJ.run(maskB, "Invert", "") maskA.show() maskB.show() impMask1.show() MaskArea1 = maskArea( impMask1.getProcessor() ) impMask2.show() MaskArea2 = maskArea( impMask2.getProcessor() ) hd = Hausdorff_Distance() hd.exec( maskA, maskB ) print(hd.getAveragedHausdorffDistance()) print(hd.getHausdorffDistance())
def run(): srcDir = DirectoryChooser("Choose!").getDirectory() if not srcDir: # user canceled dialog return for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Skip non-TIFF files if not filename.endswith(fileID): continue path = os.path.join(root, filename) print path imp = IJ.openImage(path) imp.show() if method=="contrast": IJ.run(imp,"Enhance Contrast", "saturated=0.35"); time.sleep(0.2) IJ.save(imp,path + "_auto.jpeg") imp.changes = False imp.getWindow().close() elif method=="scale": IJ.run(imp, "Scale...", "x=0.2 y=0.2 interpolation=None create title=small"); impScale = IJ.getImage() time.sleep(0.2) IJ.save(impScale,path + "_thumb.jpeg") imp.changes = False imp.getWindow().close() impScale.changes = False impScale.getWindow().close()
def SaveCoverFromFs(tiles, newwidth, newheight, cols, rows): tilewidth = int(newwidth/cols) tileheight = int(newheight/rows) newwidth = int(newwidth/tilewidth) * tilewidth newheight = int(newheight/tileheight) * tileheight hiresoutip = ColorProcessor(newwidth, newheight) hiresout = ImagePlus("hi res output", hiresoutip) hiresout.show() x = 0 y = -1 plane = [] # scale the images for i in sorted(tiles.iterkeys()): if y < rows-1: y += 1 else: y = 0 x += 1 imp = IJ.openImage(str(tiles[i])) scale = Scale(imp.getProcessor()) ipscaled = ScaleImageToSize(scale, tilewidth, tileheight) hiresoutip.copyBits(ipscaled, x*tilewidth, y*tileheight, 0) hiresout.draw()
def test_mask(): """ Test of the function: mask(ip, valueA, valueB) """ imp = IJ.openImage(filePathTest.getAbsolutePath()) ip = imp.getProcessor().duplicate() valueA = 0 valueB = 1 mask(ip, valueA, valueB).show()
def run(): print '======================' print 'running' if len(sys.argv)<2: print 'bAlignCrop will align a stack.' print 'Usage: ./fiji' filePath = '/Users/cudmore/Desktop/X20150214_a156_010_ch1_c.tif' #return else: filePath = sys.argv[1] #check that file exists if not os.path.isfile(filePath): print 'Error: did not find file ', filePath return #open file print 'opening file:', filePath imp = IJ.openImage(filePath) if imp is None: print 'Error opening file' return imp.show() winTitle = imp.getTitle() transformationFile = filePath + '.txt' print 'running multistack reg' #IJ.run(imp, "MultiStackReg", "stack_1=X20150214_a156_010_ch1_c.tif action_1=Align file_1=[] stack_2=None action_2=Ignore file_2=[] transformation=Translation save"); stackRegParams = 'stack_1=%s action_1=[Align] file_1=[%s] transformation=[Translation] save' %(winTitle, transformationFile) IJ.run('MultiStackReg', stackRegParams)
def ChooseImageFile(image_type): od = OpenDialog("Choose %s image"%image_type, None) file_name = od.getFileName() dir_name = od.getDirectory() full_path = os.path.join(dir_name,file_name) print("Opening %s"%full_path) imp = IJ.openImage(full_path) return imp
def test_VOP2(): imp = IJ.openImage(filePath.getAbsolutePath()) stack = imp.getStack() ip1 = stack.getProcessor(1).duplicate() ip2 = stack.getProcessor(2).duplicate() [si, s1, s2] = VOP( ip1, ip2 ) print(si, s1, s2) return
def test_VOP(): imp = IJ.openImage(filePath.getAbsolutePath()) stack = imp.getStack() ip1 = stack.getProcessor(1).duplicate() ip2 = stack.getProcessor(2).duplicate() mask1 = maskIP(ip1) mask2 = maskIP(ip2) [si, s1, s2] = VOP( mask1, mask2 ) print(si, s1, s2)
def test_maskIP(): imp = IJ.openImage(filePath.getAbsolutePath()) stack = imp.getStack() ip1 = stack.getProcessor(1).duplicate() ip2 = stack.getProcessor(2).duplicate() mask1 = maskIP(ip1) mask2 = maskIP(ip2) ImagePlus('mask1',mask1).show() ImagePlus('mask2',mask2).show()
def test(): imp = IJ.openImage(fname) ip = imp.getProcessor() RankFilters().rank(ip,2,RankFilters.MIN) IJ.run("Analyze Particles...", "size=500-50000 circularity=0.40-1.00 show=Masks"); imgActive = IJ.getImage() ipActive = imgActive.getProcessor().convertToFloat() imgActive.show()
def test_maskIlntersection(): """ Test of the function: maskIntersection(ip1, ip2) """ # load the input stack as an ImagePlus imp = IJ.openImage(filePath.getAbsolutePath()) stack = imp.getStack() sizeZ = imp.getStackSize() ip1 = stack.getProcessor(1).duplicate() ip2 = stack.getProcessor(2).duplicate() ImagePlus('ip1',ip1).show() ImagePlus('ip2',ip2).show() ImagePlus('intersection',maskIntersection(ip1,ip2)).show()
def run(): """ Loads an image which contains ROIs, converts the ROIs - to Evotec ROIs as an xml file, and/or - an svg image TODO: error with reading imagej Roi zip files """ ### Converting: svg --> ROI path = r'<svg width="14976" height="7616" xmlns="http://www.w3.org/2000/svg"><path id="1334669" parent_id="1334668" order="0" structure_id="8" d="M9467.215,5791.594 c82.746,153.9-39.498,279.637-145.297,374.731c-100.779,90.594-181.132,201.04-275.882,297.501 c-194.346,197.852-446.452,346.122-715.326,415.339c-197.956,50.952-375.339,57.515-574.739,10.292 c-163.894-38.817-238.032,23.184-374.617,96.708c-63.755,34.327-258.936,135.235-273.279,1.944 c-7.254-67.693,85.139-157.956,131.84-198.115c-75.233-21.777-179.232,63.946-259.551,73.706 c-107.278,13.026-218.306-4.761-326.646,7.151c-117.271,12.89-237.231,34.839-352.385,60.732 c-172.447,38.776-302.694,170.854-484.64,176.229c-184.149,5.439-355.878-49.775-514.428-140.981 c-109.53-63.024-206.267-143.981-286.415-241.022c-90.444-109.505-186.259-225.456-306.194-304.368 c-114.881-75.587-263.275-101.035-395.37-130.314c-210.799-46.727-428.626-63.547-643.334-35.954 c-211.209,27.146-412.149,76.014-626.047,80.668c-189.976,4.136-361.405-45.697-540.055-107.974 c-313.864-109.416-592.335-297.609-864.036-483.233c-168.544-115.146-305.872-300.74-461.981-435.467 c-170.078-146.796-346.666-292.801-464.156-487.593c-205.159-340.133,168.338-865.886,403.364-1091.214 c334.837-321.021,795.461-620.786,1270.673-652.714c203.342-13.663,451.505,36.074,586.741,199.88 c-133.226-249.919-140.221-425.897,39.538-633.947c504.592-584.002,1198.894-907.949,1923.306-1131.087 c860.56-265.083,1703.133-608.751,2591.13-775.18c388.287-72.771,785.628-112.479,1180.868-92.317 c160.411,8.182,304.905,36.956,471.539,62.145c196.454,29.697,414.285,60.732,588.465,139.16 c178.249,80.277,465.341,346.427,298.729,563.811c206.914-5.754,415.695,85.503,596.221,178.214 c62.759,32.229,130.516,63.325,201.264,71.23c112.855,12.605,207.703-84.423,312.921-110.243 c136.787-33.571,266.943-22.791,402.228,10.816c112.344-107.092,319.473-73.14,456.787-44.478 c353.834,73.854,692.672,90.21,1005.524,291.487c193.171,124.282,431.306,278.311,558.702,473.092 c42.081,64.333,61.586,130.708,109.786,189.1c132.437,160.473,311.515,311.912,333.839,533.115 c8.01,79.385-5.186,96.765,36.088,168.686c102.621,178.82,152.508,340.341,170.857,549.14 c14.027,159.611,124.551,239.911,158.223,409.915c21.908,110.606,49.25,226.635,47.013,324.01 c-3.304,143.779-49.269,419.993-174.464,441.982c96.516,21.854,220.154,37.618,304.754,90.247 c199.037,123.819,336.75,329.159,266.479,560.134c-103.856,341.34-337.229,610.193-560.424,880.842 c-183.617,222.651-410.791,430.394-677.031,550.955c-680.221,308.015-1326.973-5.006-2024.568,11.531 c-299.873,7.105-517.482-16.721-786.898-142.876c-118.914-55.688-215.523-84.235-345.721-105.013 c-251.304-40.1-443.702-203.442-577.426-413.314c-54.688-85.833-50.135-193.289-129.498-265.938 c-70.75-64.768-44.326-194.624,20.599-266.901c50.116-55.776,127.097-135.258,24.871-184.88 c-72.841-35.353-159.812-19.962-234.234,2.568c-42.647,12.912-99.439,29.135-126.929,67.372 C9404.609,5727.341,9444.971,5750.238,9467.215,5791.594z" style="stroke:black;fill:#bfdae3"/><path id="171453751" parent_id="1140195" order="5" structure_id="667" d="M2815.212,2659.0 c-41.795,92.794-60.946,187.111-62.889,281.487l449.915,18.282l-1.688-4.649L2815.212,2659.989z" style="stroke:black;fill:#268f45"/></svg>' node = pathToRoi(path) print(node) svgToRois(xmlPath) ### Converting: ImagePlus overlay --> svg # load the ImagePlus which contains an overlay imp = IJ.openImage(filePath.getAbsolutePath()) # Extract the ROI list [rois, w, h] = roisFromImagePlusOverlay(imp) # Convert the ROIs to an svg string text = roisToSVG(rois, w, h) # Save the svg file writeTextFile(text, saveDir, 'svg', 'rois') ### Converting: ROI file --> svg # load a ROI file rois = roisFromFile(roiPath.getAbsolutePath()) text = roisToSVG(rois, w, h) # Save the svg file writeTextFile(text, saveDir, 'svg', 'roisFromFile') ### Converting: ImagePlus overlay --> svg ### Converting: Evotec region --> ROI ### Converting: ROI --> Evotec region # Extract the ROI list [rois, w, h] = roisFromImagePlusOverlay(imp) # Convert the ROIs to an svg string Xlt = 0; Ylt = 0; Xrb = w; Yrb = h; text = roiToEvotecRegion(rois[0], w, h, Xlt, Ylt, Xrb, Yrb) # Save the svg file writeTextFile(text, saveDir, 'txt', 'rois_Evotec') [text1, text2] = pathCoordsToRoi(svgToRois(xmlPath)) writeTextFile(text1, saveDir, 'txt', 'coords1') writeTextFile(text2, saveDir, 'txt', 'coords2')
def reload_cropped_16bit(self, list_of_16bits): for image_file in list_of_16bits: well_site = image_file.split("_")[0] channel = image_file.split("_")[1] x = image_file.split("_")[2] y = image_file.split("_")[3][:-4] self.name = well_site full_path = os.path.join(self.data_dir, image_file) img = IJ.openImage(full_path) self.channel_names.append(channel) self.cropped_imps[channel] = img self.x = x self.y = y
def applyFilter(stackName): imp = IJ.openImage(stackName) stack = imp.getImageStack() for i in xrange(1, imp.getNSlices()+1): image = ImagePlus(str(i), stack.getProcessor(i)) IJ.run(image, "Auto Threshold", "method=Li white") #IJ.run(image, "Analyze Particles...", "size= 1000-Infinity circularity=0.00-1.00 show=Masks in_situ") imp2 = ImagePlus("Threshold", stack) fs = FileSaver(imp2) print "Saving filtered stack" fs.saveAsTiff(stackName[:-4] + "-filtered(Li).tif")
def prepareNewImage(imgData, direction=None): if direction == 'prev': # if not imgData.hasPrev(): # print 'end! back to first' imgPath = imgData.prev() elif direction == 'unmarked': imgPath = imgData.nextUnmarked() else: # if not imgData.hasNext(): # print 'end! to end' imgPath = imgData.next() imp = IJ.openImage(imgPath) try: newUnit = pmWin.unitText.getText() newPixelSize = float(pmWin.pixelSizeText.getText()) except ValueError: pass else: cal = imp.getCalibration() cal.setUnit(newUnit) cal.pixelWidth = newPixelSize cal.pixelHeight = cal.pixelWidth imp.show() pointsTable = imgData.table if os.path.basename(imgPath) in pointsTable: pts = pointsTable[os.path.basename(imgPath)] if len(pts) >= 6: # at least 3 points (i.e. 6 coords) imgPoints = map(float, pts[:6]) drawLines(imp, imgPoints) lEnd = ListenToDrawEnd() lAdd = ListenToPointAdd() win = imp.getWindow() if win: # override key listeners wkls = win.getKeyListeners() map(win.removeKeyListener, wkls) win.addKeyListener(lEnd) map(win.addKeyListener, wkls) canvas = win.getCanvas() ckls = canvas.getKeyListeners() map(canvas.removeKeyListener, ckls) canvas.addKeyListener(lEnd) map(canvas.addKeyListener, ckls) canvas.addMouseListener(lAdd)
def measure(self): imp = IJ.openImage(self.filename) IJ.log("Input file: %s" % self.filename) ImageConverter(imp).convertToGray8() res = Auto_Threshold().exec(imp, self.myMethod, self.noWhite, self.noBlack, self.doIwhite, self.doIset, self.doIlog, self.doIstackHistogram) rt = ResultsTable() rt.showRowNumbers(False) pa = PA(self.options, PA.AREA + PA.PERIMETER + PA.CIRCULARITY, rt, self.MINSIZE, self.MAXSIZE) pa.analyze(imp) self.result = self.rtToResult(rt) self.mask = imp
def buildStack(saveDir, stacklist): stacks = {} for f, s in stacklist.iteritems(): for name in s: curSlice = IJ.openImage(os.path.join(saveDir, name)) if f not in stacks: stack = curSlice.createEmptyStack() stack.addSlice(name, curSlice.getProcessor()) stacks[f] = stack else: stacks[f].addSlice(name, curSlice.getProcessor()) call(["/usr/local/bin/rmtrash",os.path.join(saveDir,name)]) for f in stacks: IJ.saveAsTiff(ImagePlus(f, stacks[f]),os.path.join(saveDir,f))
#initiate ROIManager RM = RoiManager() rm = RM.getRoiManager() rm.runCommand("Associate", "true") rm.runCommand("Show All with labels") #for each image open.. for image in image_titles: imp = WindowManager.getImage(image) baseName = os.path.splitext(image)[0] #get the name without extension roiFile = baseName + "_ROISet.zip" #make the name for the text file roiFilePath = os.path.join(roiDir, roiFile) #make the path for the text file print(roiFilePath) IJ.openImage(roiFilePath) #open the roi text file rm.runCommand("Select All") #select all rois rm.runCommand(imp, "Measure") #measure all rois rm.runCommand(imp, "Show All") #show all rois imp2 = imp.flatten() #create a flattened key with rois and labels imp2.setTitle("labels_" + baseName + ".tif") imp2.show() rm.runCommand("Select All") rm.runCommand("Delete") #delete all rois in manager imp.close() rm.runCommand(imp, "Show None") windowName = imp2.getTitle() flatPath = os.path.join(roiDir, windowName)
from script.imglib.math import Compute, Add, Subtract from script.imglib.color import HSB, Hue, Saturation, Brightness from script.imglib import ImgLib from ij import IJ # Obtain an image img = ImgLib.wrap(IJ.openImage("https://imagej.nih.gov/ij/images/clown.jpg")) # Obtain a new clown, whose hue has been shifted by half # with the same saturation and brightness of the original bluey = Compute.inRGBA( HSB(Add(Hue(img), 0.5), Saturation(img), Brightness(img))) print type(Hue(img)), type(Saturation(img)), type(Brightness(img)) ImgLib.wrap(Compute.inFloats(Hue(img))).show() ImgLib.wrap(Compute.inFloats(Saturation(img))).show() ImgLib.wrap(Compute.inFloats(Brightness(img))).show() ImgLib.wrap(bluey).show()
dstFile = srcFile[:-4] + "_out.tif" srcpath = os.path.join(workDir, srcFile) dstpath = os.path.join(workDir, dstFile) # function: thresholding def threshold(p): if p > th[i][0] and p < th[i][1]: return p else: return 0.0 # open image (RGB) imp = IJ.openImage(srcpath) # convert ImageConverter(imp).convertToLab() ## imageProcessor ip = imp.getProcessor() # FloatProcessor by channel chpx = [] # channel pixels filtered = [] # filtered FloatProcessor for i in range(3): chpx.append(ip.toFloat(i, None).getPixels()) _filtered = [threshold(p) for p in chpx[i]] # filtered pixels filtered.append(FloatProcessor(ip.width, ip.height, _filtered, None))
def process(srcDir, dstDir, currentDir, fileName, keepDirectories): print "Processing:" # Opening the image print "Open image file", fileName imp = IJ.openImage(os.path.join(currentDir, fileName)) #Here we make sure the calibration are correct units = "pixel" TimeUnit = "unit" newCal = Calibration() newCal.pixelWidth = 1 newCal.pixelHeight = 1 newCal.frameInterval = 1 newCal.setXUnit(units) newCal.setYUnit(units) newCal.setTimeUnit(TimeUnit) imp.setCalibration(newCal) cal = imp.getCalibration() dims = imp.getDimensions() # default order: XYCZT if (dims[4] == 1): imp.setDimensions(1, 1, dims[3]) # Start the tracking model = Model() #Read the image calibration model.setPhysicalUnits(cal.getUnit(), cal.getTimeUnit()) # Send all messages to ImageJ log window. model.setLogger(Logger.IJ_LOGGER) settings = Settings() settings.setFrom(imp) # Configure detector - We use the Strings for the keys # Configure detector - We use the Strings for the keys settings.detectorFactory = DownsampleLogDetectorFactory() settings.detectorSettings = { DetectorKeys.KEY_RADIUS: 2., DetectorKeys.KEY_DOWNSAMPLE_FACTOR: 2, DetectorKeys.KEY_THRESHOLD: 1., } print(settings.detectorSettings) # Configure spot filters - Classical filter on quality filter1 = FeatureFilter('QUALITY', 0, True) settings.addSpotFilter(filter1) # Configure tracker - We want to allow merges and fusions settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap( ) # almost good enough settings.trackerSettings['LINKING_MAX_DISTANCE'] = LINKING_MAX_DISTANCE settings.trackerSettings['ALLOW_TRACK_SPLITTING'] = ALLOW_TRACK_SPLITTING settings.trackerSettings['SPLITTING_MAX_DISTANCE'] = SPLITTING_MAX_DISTANCE settings.trackerSettings['ALLOW_TRACK_MERGING'] = ALLOW_TRACK_MERGING settings.trackerSettings['MERGING_MAX_DISTANCE'] = MERGING_MAX_DISTANCE settings.trackerSettings[ 'GAP_CLOSING_MAX_DISTANCE'] = GAP_CLOSING_MAX_DISTANCE settings.trackerSettings['MAX_FRAME_GAP'] = MAX_FRAME_GAP # Configure track analyzers - Later on we want to filter out tracks # based on their displacement, so we need to state that we want # track displacement to be calculated. By default, out of the GUI, # not features are calculated. # The displacement feature is provided by the TrackDurationAnalyzer. settings.addTrackAnalyzer(TrackDurationAnalyzer()) settings.addTrackAnalyzer(TrackSpeedStatisticsAnalyzer()) filter2 = FeatureFilter('TRACK_DISPLACEMENT', 10, True) settings.addTrackFilter(filter2) #------------------- # Instantiate plugin #------------------- trackmate = TrackMate(model, settings) #-------- # Process #-------- ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) ok = trackmate.process() if not ok: sys.exit(str(trackmate.getErrorMessage())) #---------------- # Display results #---------------- if showtracks: model.getLogger().log('Found ' + str(model.getTrackModel().nTracks(True)) + ' tracks.') selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() # The feature model, that stores edge and track features. fm = model.getFeatureModel() with open(dstDir + fileName + 'tracks_properties.csv', "w") as file: writer1 = csv.writer(file) writer1.writerow([ "track #", "TRACK_MEAN_SPEED (micrometer.secs)", "TRACK_MAX_SPEED (micrometer.secs)", "NUMBER_SPLITS", "TRACK_DURATION (secs)", "TRACK_DISPLACEMENT (micrometer)" ]) with open(dstDir + fileName + 'spots_properties.csv', "w") as trackfile: writer2 = csv.writer(trackfile) #writer2.writerow(["spot ID","POSITION_X","POSITION_Y","Track ID", "FRAME"]) writer2.writerow( ["Tracking ID", "Timepoint", "Time (secs)", "X pos", "Y pos"]) for id in model.getTrackModel().trackIDs(True): # Fetch the track feature from the feature model. v = (fm.getTrackFeature(id, 'TRACK_MEAN_SPEED') * Pixel_calibration) / Time_interval ms = (fm.getTrackFeature(id, 'TRACK_MAX_SPEED') * Pixel_calibration) / Time_interval s = fm.getTrackFeature(id, 'NUMBER_SPLITS') d = fm.getTrackFeature(id, 'TRACK_DURATION') * Time_interval e = fm.getTrackFeature( id, 'TRACK_DISPLACEMENT') * Pixel_calibration model.getLogger().log('') model.getLogger().log('Track ' + str(id) + ': mean velocity = ' + str(v) + ' ' + model.getSpaceUnits() + '/' + model.getTimeUnits()) track = model.getTrackModel().trackSpots(id) writer1.writerow( [str(id), str(v), str(ms), str(s), str(d), str(e)]) for spot in track: sid = spot.ID() x = spot.getFeature('POSITION_X') y = spot.getFeature('POSITION_Y') z = spot.getFeature('TRACK_ID') t = spot.getFeature('FRAME') time = int(t) * int(Time_interval) writer2.writerow( [str(id), str(t), str(time), str(x), str(y)])
def getPixel( ): # Get pixel size from user with dialog box. could return python dict or make custom class? Note user can return a NaN if doesnt enter numeric gd = GenericDialog("Pixel") gd.addNumericField("Pixel size desired (mm)", 6.4, 1) # default is 1 decimal gd.showDialog() Pixel_size = gd.getNextNumber( ) #6.4 # ENTER mm, remember tolerance is +/-30% return Pixel_size ######### open image using dialogue box #imp = IJ.getImage() original = IJ.openImage(getFile()) original.show() ########## Use thresholding and selection to define UFOV ################################################################################### #IJ.run("ROI Manager...", "") # not sure if I need this IJ.setRawThreshold( original, 1, 255, '') # background pixels have value 0. See IMAGE>ADJUST>THRESHOLD IJ.run(original, "Create Selection", "") # add bounding box. See EDIT>SELECTION IJ.run(original, "To Bounding Box", "") # this box defines the UFOV. See EDIT>SELECTION IJ.resetThreshold( original) # get back original now UFOV is definedthresholding UFOV = Duplicator().run(
https://imagej.net/Jython_Scripting_Examples """ from ij import IJ from ij.plugin.filter import EDM, ParticleAnalyzer from ij.process import ImageProcessor from ij.measure import ResultsTable from ij.plugin.frame import RoiManager from ij.measure import Measurements from java.lang import Double IJ.run("Close All") # 1 - Obtain an image blobs = IJ.openImage("http://imagej.net/images/blobs.gif") # Make a copy with the same properties as blobs image: imp = blobs.createImagePlus() ip = blobs.getProcessor().duplicate() imp.setProcessor("blobs copy", ip) imp_work = imp.duplicate() imp_work.setTitle("Work") ip_work = imp_work.getProcessor() # 2 - Apply a threshold: only zeros and ones # Set the desired threshold range: keep from 0 to 74 ip_work.setThreshold(147, 147, ImageProcessor.NO_LUT_UPDATE) # Call the Thresholder to convert the image to a mask IJ.run(imp_work, "Convert to Mask", "")
from clearcl import ClearCLImage from net.imglib2.img.display.imagej import ImageJFunctions from net.imglib2.type.numeric.integer import UnsignedShortType from net.imglib2.view import Views from ij import IJ from java.lang import Float from net.haesleinhuepf.clij.kernels import Kernels import os import inspect # retrieve the folder where this script is located (thanks to @mountain_man from the ImageJ forum) filesPath = os.path.dirname(os.path.abspath( inspect.getsourcefile(lambda: 0))) + "/" # take the current image which is open in ImageJ imp = IJ.openImage("http://imagej.nih.gov/ij/images/t1-head.zip") # initialize ClearCL context and convenience layer clij = CLIJ.getInstance() # convert imglib2 image to CL images (ready for the GPU) inputCLImage = clij.convert(imp, ClearCLImage) tempCLImage = clij.createCLImage( [inputCLImage.getWidth(), inputCLImage.getHeight()], inputCLImage.getChannelDataType()) outputCLImage = clij.createCLImage( [inputCLImage.getWidth(), inputCLImage.getHeight()], inputCLImage.getChannelDataType()) # crop out a center plane of the 3D data set Kernels.copySlice(clij, inputCLImage, tempCLImage, 64)
def computeRegistration(): while atomicI.get() < nSections: k = atomicI.getAndIncrement() if k < nSections: l = k IJ.log('Computing EM/LM registration for layer ' + str(l).zfill(4)) layerFolder = fc.mkdir_p( os.path.join(registrationFolder, 'layer_' + str(l).zfill(4))) toRegisterFolder = fc.mkdir_p( os.path.join(layerFolder, 'toRegister')) registeredFolder = fc.mkdir_p( os.path.join(layerFolder, 'registered')) # Applying appropriate filters to make lowresEM and LM look similar for layer l imLM = IJ.openImage(imPaths['LM'][l]) imLM = fc.localContrast(imLM) imLMPath = os.path.join(toRegisterFolder, 'imLM_' + str(l).zfill(4) + '.tif') IJ.save(imLM, imLMPath) imEM = IJ.openImage(imPaths['EM'][l]) imEM = fc.localContrast(imEM) imEMPath = os.path.join(toRegisterFolder, 'imEM_' + str(l).zfill(4) + '.tif') IJ.save(imEM, imEMPath) # Compute first a simple affine registration on the non-cropped images IJ.log( 'Computing affine and moving least squares alignment for layer ' + str(l).zfill(4)) firstStepRegistered = False # registration at first step with 1step/octave (less features) pLowRes = getSIFTMatchingParameters(nOctaves[0], 1.6, 16, 4000, 8, 4) featuresLM = getFeatures(imLMPath, pLowRes) featuresEM = getFeatures(imEMPath, pLowRes) matchingResults = getMatchingResults(featuresLM, featuresEM) if matchingResults is None: IJ.log( 'No registration matching at low resolution matching step 1 in layer ' + str(l).zfill(4)) else: model, inliers = matchingResults distance = PointMatch.meanDistance( inliers ) # mean displacement of the remaining matching features IJ.log('---Layer ' + str(l).zfill(4) + ' distance ' + str(distance) + ' px with ' + str(len(inliers)) + ' inliers') if distance > matchingThreshold[0]: IJ.log( 'Matching accuracy is lower than the threshold at the low resolution step 1 - ' + str(l).zfill(4) + ' - distance - ' + str(distance)) else: affTransform = model.createAffine() s1, s2 = getScalingFactors(affTransform) IJ.log('Layer ' + str(l).zfill(4) + ' scaling factors - step 1 - ' + str(s1) + ' - ' + str(s2) + '--' + str(s1 * s2) + ' affDeterminant ' + str(affTransform.getDeterminant()) + ' nInliers ' + str(len(inliers))) if (abs(s1 - 1) < 0.2) and ( abs(s2 - 1) < 0.2 ): # scaling in both directions should be close to 1 IJ.log('First step ok - layer ' + str(l).zfill(4)) firstStepRegistered = True loaderZ.serialize( affTransform, os.path.join(registeredFolder, 'affineSerialized')) if not firstStepRegistered: IJ.log( 'First step registration in layer ' + str(l).zfill(4) + ' with few features has failed. Trying with more features.' ) # registration at first step with 3steps/octave (more features) # pLowRes = getSIFTMatchingParameters(3, 1.6, 64, 4000, 8, 4) pLowRes = getSIFTMatchingParameters(nOctaves[0], 1.6, 16, 4000, 8, 4) # for BIB featuresLM = getFeatures(imLMPath, pLowRes) featuresEM = getFeatures(imEMPath, pLowRes) matchingResults = getMatchingResults(featuresLM, featuresEM) if matchingResults is None: IJ.log( 'No registration matching at low resolution matching step 1bis in layer ' + str(l).zfill(4)) else: model, inliers = matchingResults distance = PointMatch.meanDistance( inliers ) # mean displacement of the remaining matching features IJ.log('---Layer ' + str(l).zfill(4) + ' distance ' + str(distance) + ' px with ' + str(len(inliers)) + ' inliers') if distance > matchingThreshold[0]: IJ.log( 'Matching accuracy is lower than the threshold at the high resolution step 1bis - ' + str(l).zfill(4) + ' - distance - ' + str(distance)) else: affTransform = model.createAffine() s1, s2 = getScalingFactors(affTransform) IJ.log('Layer ' + str(l).zfill(4) + ' scaling factors - step 1bis - ' + str(s1) + ' - ' + str(s2) + '--' + str(s1 * s2) + ' affDeterminant ' + str(affTransform.getDeterminant()) + ' nInliers ' + str(len(inliers))) if (abs(s1 - 1) < 0.2) and ( abs(s2 - 1) < 0.2 ): # scaling in both directions should be close to 1 IJ.log('First step 1bis ok - layer ' + str(l).zfill(4)) firstStepRegistered = True loaderZ.serialize( affTransform, os.path.join(registeredFolder, 'affineSerialized')) if not firstStepRegistered: IJ.log('The two first step trials in layer ' + str(l).zfill(4) + ' have failed') else: # Affine transform and crop the LM, and compute a high res MLS matching with lock: # only one trakem working at a time # apply affTransform patch = Patch.createPatch(pZ, imLMPath) layerZ.add(patch) patch.setAffineTransform(affTransform) patch.updateBucket() # crop and export bb = Rectangle(0, 0, widthEM, heightEM) affineCroppedIm = loaderZ.getFlatImage( layerZ, bb, 1, 0x7fffffff, ImagePlus.GRAY8, Patch, layerZ.getAll(Patch), True, Color.black, None) affineCroppedImPath = os.path.join( toRegisterFolder, 'affineCroppedLM_' + str(l).zfill(4) + '.tif') IJ.save(affineCroppedIm, affineCroppedImPath) affineCroppedIm.close() layerZ.remove(patch) layerZ.recreateBuckets() pHighRes = getSIFTMatchingParameters(nOctaves[1], 1.6, 64, 4096, 8, 4) featuresLM = getFeatures(affineCroppedImPath, pHighRes) featuresEM = getFeatures(imEMPath, pHighRes) # get the MLS matchingResults = getMatchingResults(featuresLM, featuresEM) if matchingResults is None: IJ.log( 'It cannot be, there should be a good match given that an affine was computed. Layer ' + str(l).zfill(4)) else: model, inliers = matchingResults affTransform = model.createAffine() s1, s2 = getScalingFactors(affTransform) IJ.log('Second step determinant - layer ' + str(l).zfill(4) + ' - determinant - ' + str(affTransform.getDeterminant()) + ' nInliers ' + str(len(inliers)) + 'Scaling factors - step 2 - ' + str(s1) + ' - ' + str(s2)) if (abs(s1 - 1) < 0.2) and (abs(s2 - 1) < 0.2) and len( inliers ) > 50: # scaling in both directions should be close to 1 distance = PointMatch.meanDistance( inliers ) # mean displacement of the remaining matching features if distance > matchingThreshold[1]: IJ.log( 'Weird: matching accuracy is lower than the threshold at the high resolution step 2 - ' + str(l).zfill(4) + ' - distance - ' + str(distance)) else: mlst = MovingLeastSquaresTransform() mlst.setModel(AffineModel2D) mlst.setAlpha(1) mlst.setMatches(inliers) xmlMlst = mlst.toXML('\t') with open( os.path.join(registeredFolder, 'MLST.xml'), 'w') as f: f.write(xmlMlst) loaderZ.serialize( mlst, os.path.join(registeredFolder, 'mlstSerialized')) registrationStats.append( [l, distance, len(inliers)])
imPaths['EM'] = [ os.path.join(exportedEMFolder, imageName) for imageName in fc.naturalSort(os.listdir(exportedEMFolder)) if os.path.splitext(imageName)[1] == '.tif' ] imPaths['LM'] = [ os.path.join(exportedLMFolder, imageName) for imageName in fc.naturalSort(os.listdir(exportedLMFolder)) if os.path.splitext(imageName)[1] == '.tif' ] # surfaceIds = [0,16,32,48,65,81,97,113,129,145,162,179,195,211,227,243,260,276,293,310] # optimal 16-17 # imPaths['EM'] = [imPaths['EM'][i] for i in surfaceIds] # get the dimensions of the EM layerset by looking at the dimensions of the first EM image, save for next script imEM0 = IJ.openImage(imPaths['EM'][0]) widthEM = imEM0.width heightEM = imEM0.height imEM0.close() f = open(os.path.join(registrationFolder, 'lowResEMBounds'), 'w') pickle.dump([widthEM, heightEM], f) f.close() registrationStatsPath = os.path.join(registrationFolder, 'registrationStats') registrationStats = [] # create dummy trkem for applying affine and cropping LM in the first registration step pZ, loaderZ, layersetZ, nLayersZ = fc.getProjectUtils( fc.initTrakem(temporaryFolder, 1)) layersetZ.setDimensions(0, 0, widthEM * 5, heightEM * 5) layerZ = layersetZ.getLayers().get(0)
import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzerFactory as SpotContrastAndSNRAnalyzerFactory import fiji.plugin.trackmate.action.ExportStatsToIJAction as ExportStatsToIJAction import fiji.plugin.trackmate.io.TmXmlReader as TmXmlReader import fiji.plugin.trackmate.action.ExportTracksToXML as ExportTracksToXML import fiji.plugin.trackmate.io.TmXmlWriter as TmXmlWriter import fiji.plugin.trackmate.features.ModelFeatureUpdater as ModelFeatureUpdater import fiji.plugin.trackmate.features.SpotFeatureCalculator as SpotFeatureCalculator import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzer as SpotContrastAndSNRAnalyzer import fiji.plugin.trackmate.features.spot.SpotIntensityAnalyzerFactory as SpotIntensityAnalyzerFactory import fiji.plugin.trackmate.features.track.TrackSpeedStatisticsAnalyzer as TrackSpeedStatisticsAnalyzer import fiji.plugin.trackmate.util.TMUtils as TMUtils # Get currently selected image #imp = WindowManager.getCurrentImage() imp = IJ.openImage('http://fiji.sc/samples/FakeTracks.tif') #imp.show() #------------------------- # Instantiate model object #------------------------- model = Model() # Set logger model.setLogger(Logger.IJ_LOGGER) #------------------------ # Prepare settings object #------------------------
""" make_mandrill_256.py Make a cropped Mandrill Image Date Who What ---------- --- ---------------------------------------------------- 2019-06-16 JRM Initial adaptation. Use the HOME environment variable to get the start of the path """ from ij import IJ import os home_dir = os.getenv("HOME") print(home_dir) IJ.run("Close All") imp = IJ.openImage(home_dir + "/dat/images/key-test/mandrill.tif"); imp.setRoi(0,0,256,256); imp.show() IJ.run("Crop") imp.show() IJ.saveAs(imp, "Tiff", home_dir + "/Documents/git/tips/ImageJ/tif/mandrill_256.tif");
def call(self): return IJ.openImage(self.filepath)
def writeITAsForAllTiffsInDirectory(directory,percentageThickness,orderIndependent): #precalculate thickness stuff thicknessFile = open("../binary/thickness/all-thickness-measurements.csv",'rb') thicknessReader = csv.reader(thicknessFile, delimiter=',') thicknessReader.next() thicknessNames = [] thicknessMeasurements = [] for thicknessTuple in thicknessReader: print(thicknessTuple[1],floor(float(thicknessTuple[2]))) thicknessNames.append(thicknessTuple[1]) thicknessMeasurements.append(floor(float(thicknessTuple[2]))) print(thicknessNames) #other setting up percentThicknessUsedAsCutoff = str(int(percentageThickness*100)) edgeCoordsFilePrefix = '../edge-coordinates/edge-coordinates-percThick-'+percentThicknessUsedAsCutoff+'-useClusters-'+str(orderIndependent) anglesFilePrefix = '../angles/angles-percThick-'+percentThicknessUsedAsCutoff+'-useClusters-'+str(orderIndependent) percentageFileName = '../perc-output/percentages-percThick'+percentThicknessUsedAsCutoff+'-useClusters-'+str(orderIndependent)+'.csv' open(percentageFileName, 'w').close() #delete contents from previous run counter=0 for fileName in os.listdir(directory): if fileName.endswith(".tif"): #get trabecular thickness print(fileName) thicknessName = fileName.replace("_skeleton.tif",".tif") print(thicknessName) currentThickness = thicknessMeasurements[[i for i, t in enumerate(thicknessNames) if t==thicknessName][0]] print(currentThickness) counter=counter+1 #execute ITA IJ.run("Clear BoneJ results"); currentImageName = os.path.join(directory, fileName) currentImage = IJ.openImage(currentImageName) IJ.run(currentImage, "Set Scale...", "distance=0 known=0 pixel=1 unit=pixel"); print(currentImage) print(percentageThickness) print(currentThickness) print(round(float(percentageThickness*currentThickness))) wrapper = cs.run("org.bonej.wrapperPlugins.IntertrabecularAngleWrapper", True, ["inputImage",currentImage, "minimumValence", 3, "maximumValence", 50, "minimumTrabecularLength", round(float(percentageThickness*currentThickness)),"marginCutOff",round(float(percentageThickness*currentThickness)),"iteratePruning", True, "useClusters", orderIndependent, "printCentroids", True,"printCulledEdgePercentages", True]) currentImage.close() wrapperInstance = wrapper.get() #save angles currentAnglesList = wrapperInstance.getOutput("anglesTable") anglesFile = open(anglesFilePrefix+'-'+os.path.splitext(fileName)[0]+'.csv',"w") anglesWriter = csv.writer(anglesFile, delimiter=',') if currentAnglesList: #"is not empty" for i,angles in enumerate(currentAnglesList): if(i>0): print([int(currentAnglesList.getColumnHeader(i))]) if(int(currentAnglesList.getColumnHeader(i))<20): anglesWriter.writerow([int(currentAnglesList.getColumnHeader(i))]+angles) else: anglesWriter.writerow([int(currentAnglesList.getColumnHeader(i))]+angles) anglesWriter.writerow(['Koosh ball alert: there is a node with valence '+currentAnglesList.getColumnHeader(i)]) currentAnglesList.setRowCount(0) break currentAnglesList.setRowCount(0) anglesFile.close() #save edge coordinates currentEdgeList = wrapperInstance.getOutput("centroidTable") edgeFile = open(edgeCoordsFilePrefix+os.path.splitext(fileName)[0]+'.csv',"w") edgeWriter = csv.writer(edgeFile, delimiter=',') if currentEdgeList: for i in range(len(currentEdgeList[0])): edgeRow = [currentEdgeList[0][i],currentEdgeList[1][i],currentEdgeList[2][i],currentEdgeList[3][i],currentEdgeList[4][i],currentEdgeList[5][i]] edgeWriter.writerow(edgeRow) edgeFile.close() #save percentages currentPercentageList = wrapperInstance.getOutput("culledEdgePercentagesTable") percentageFile = open(percentageFileName, "a") percentageWriter = csv.writer(percentageFile, delimiter=',') percentageRow = [item for sublist in currentPercentageList for item in sublist] percentageRow.insert(0, fileName) percentageWriter.writerow(percentageRow) percentageFile.close() continue else: continue
])[0] # should I make 2 projects ? One for rigid, one for warped ? exportedEMFolder = fc.findFoldersFromTags( MagCFolder, ['export_alignedEMForRegistration'])[0] nLayers = len(os.listdir(exportedEMFolder)) registrationFolder = os.path.join(os.path.dirname(projectPath), 'LMEMRegistration') for idChannel, channel in enumerate(channels): affineCroppedFolder = os.path.join(LMFolder, 'affineCropped_' + channel) # the dimensions of the first affineCropped determine the size of the layerset of the trakem project (and for the export) firstImagePath = os.path.join(affineCroppedFolder, os.walk(affineCroppedFolder).next()[2][0]) im0 = IJ.openImage(firstImagePath) width0 = im0.getWidth() height0 = im0.getHeight() im0.close() roiExport = Rectangle(0, 0, width0, height0) projectPath = os.path.join(EMFolder, 'LMProject_' + channel + '.xml') p, loader, layerset, nLayers = fc.getProjectUtils( fc.initTrakem(LMFolder, nLayers)) p.saveAs(projectPath, True) layerset.setDimensions(0, 0, width0, height0) for l, layer in enumerate(layerset.getLayers()): layerFolder = os.path.join(registrationFolder, 'layer_' + str(l).zfill(4))
def create_registered_hyperstack(imp, target_folder, channel): """ Takes the imp, which contains a virtual hyper stack, and determines the x,y,z drift for each pair of time points, using the preferred given channel, and output one image for each slide into the target folder.""" shifts = compute_frame_translations(imp, channel) # Make shifts relative to 0,0,0 of the original imp: shifts = concatenate_shifts(shifts) print "shifts concatenated:" for s in shifts: print s.x, s.y, s.z # Compute bounds of the new volume, # which accounts for all translations: minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts) # Make shifts relative to new canvas dimensions # so that the min values become 0,0,0 for shift in shifts: shift.x -= minx shift.y -= miny shift.z -= minz print "shifts relative to new dimensions:" for s in shifts: print s.x, s.y, s.z # new canvas dimensions: width = imp.width + maxx - minx height = maxy - miny + imp.height slices = maxz - minz + imp.getNSlices() print "New dimensions:", width, height, slices # Count number of digits of each dimension, to output zero-padded numbers: slice_digits = len(str(slices)) frame_digits = len(str(imp.getNFrames())) channel_digits = len(str(imp.getNChannels())) # List to accumulate all created names: names = [] # Prepare empty slice to pad in Z when necessary empty = imp.getProcessor().createProcessor(width, height) # if it's RGB, fill the empty slice with blackness if isinstance(empty, ColorProcessor): empty.setValue(0) empty.fill() # Write all slices to files: stack = imp.getStack() for frame in range(1, imp.getNFrames()+1): shift = shifts[frame-1] fr = "t" + zero_pad(frame, frame_digits) # Pad with mpty slices before reaching the first slice for s in range(shift.z): ss = "_z" + zero_pad(s + 1, slice_digits) # slices start at 1 for ch in range(1, imp.getNChannels()+1): name = fr + ss + "_c" + zero_pad(ch, channel_digits) +".tif" names.append(name) FileSaver(ImagePlus("", empty)).saveAsTiff(target_folder + "/" + name) # Add all proper slices for s in range(1, imp.getNSlices()+1): ss = "_z" + zero_pad(s + shift.z, slice_digits) for ch in range(1, imp.getNChannels()+1): ip = stack.getProcessor(imp.getStackIndex(ch, s, frame)) ip2 = ip.createProcessor(width, height) # potentially larger ip2.insert(ip, shift.x, shift.y) name = fr + ss + "_c" + zero_pad(ch, channel_digits) +".tif" names.append(name) FileSaver(ImagePlus("", ip2)).saveAsTiff(target_folder + "/" + name) # Pad the end for s in range(shift.z + imp.getNSlices(), slices): ss = "_z" + zero_pad(s + 1, slice_digits) for ch in range(1, imp.getNChannels()+1): name = fr + ss + "_c" + zero_pad(ch, channel_digits) +".tif" names.append(name) FileSaver(ImagePlus("", empty)).saveAsTiff(target_folder + "/" + name) # Create virtual hyper stack with the result vs = ImageStack(width, height, None) for name in names: vs.addSlice(IJ.openImage(target_folder+"/"+name).getProcessor()) vs_imp = ImagePlus("registered time points", vs) vs_imp.setDimensions(imp.getNChannels(), len(names) / (imp.getNChannels() * imp.getNFrames()), imp.getNFrames()) vs_imp.setOpenAsHyperStack(True) IJ.log("\nHyperstack dimensions: time frames:" + str(vs_imp.getNFrames()) + ", slices: " + str(vs_imp.getNSlices()) + ", channels: " + str(vs_imp.getNChannels())) if 1 == vs_imp.getNSlices(): return vs_imp # Else, as composite mode = CompositeImage.COLOR; if isinstance(imp, CompositeImage): mode = imp.getMode() else: return vs_imp return CompositeImage(vs_imp, mode)
def openStack(filepath): if filepath.endswith(".klb"): return klb.readFull(filepath) else: return IJ.openImage(filepath)
import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzerFactory as SpotContrastAndSNRAnalyzerFactory import fiji.plugin.trackmate.action.ExportStatsToIJAction as ExportStatsToIJAction import fiji.plugin.trackmate.io.TmXmlReader as TmXmlReader import fiji.plugin.trackmate.action.ExportTracksToXML as ExportTracksToXML import fiji.plugin.trackmate.io.TmXmlWriter as TmXmlWriter import fiji.plugin.trackmate.features.ModelFeatureUpdater as ModelFeatureUpdater import fiji.plugin.trackmate.features.SpotFeatureCalculator as SpotFeatureCalculator import fiji.plugin.trackmate.features.spot.SpotContrastAndSNRAnalyzer as SpotContrastAndSNRAnalyzer import fiji.plugin.trackmate.features.spot.SpotIntensityAnalyzerFactory as SpotIntensityAnalyzerFactory import fiji.plugin.trackmate.features.track.TrackSpeedStatisticsAnalyzer as TrackSpeedStatisticsAnalyzer import fiji.plugin.trackmate.features.track.TrackDurationAnalyzer as TrackDurationAnalyzer import fiji.plugin.trackmate.util.TMUtils as TMUtils # Get currently selected image #imp = WindowManager.getCurrentImage() imp = IJ.openImage('{target_file}') IJ.run( imp, "Properties...", "channels=1 slices=1 frames=651 unit=pixel pixel_width=1.0000 pixel_height=1.0000 voxel_depth=1.0000" ) #imp = IJ.openImage('/home/ubuntu/data/RED_nPEG_37C_pH72_S1_1_1_2.tif') #imp = IJ.openImage('http://fiji.sc/samples/FakeTracks.tif') #imp.show() #------------------------- # Instantiate model object #------------------------- model = Model() # Set logger
#1. Open data and run stunted Trackmate to determine auto quality number #2. Set Image properties #3. Configure detector #4. Configure spot filters #5. Configure LAP Tracker #6. Run trackmate #7. Get spot or track features #8. Save log as a parsable data set #1. #Opening data and determing auto quality number the_input = getArgument() the_list = the_input.rpartition(" ") image = the_list[0] imp = IJ.openImage(image) imp.show() subtraction = float(the_list[2]) model = Model() settings = Settings() settings.setFrom(imp) settings.detectorFactory = DogDetectorFactory() settings.detectorSettings = { 'DO_SUBPIXEL_LOCALIZATION': True, 'RADIUS': 0.350, 'TARGET_CHANNEL': 1, 'THRESHOLD': 0.0, 'DO_MEDIAN_FILTERING': True, } settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory())
for ii in range(0, nColor): idx = Rnd.nextInt(len(cls)) CL.append(cls[idx]) return CL p = Project.getProjects()[0] p_tree_r = p.getRootProjectThing() layerset = p.getRootLayerSet() TempThing = p.getTemplateThing("neuron") CL = DistinctColors(nROIsCount) for k in range(1, nROIsCount + 1): strFn = strDir + "N" + str(k) + "_reg.tif" print("Processing file: " + strFn) imp = IJ.openImage(strFn) ali = AreaList(p, "N" + str(k), 0, 0) layerset.add(ali) pt = ProjectThing(TempThing, p, ali) p_tree_r.addChild(pt) stack = imp.getImageStack() for i in range(1, imp.getNSlices() + 1): ip = stack.getProcessor(i) # 1-based m = AreaUtils.extractAreas(ip) # Report progress #print i, ":", len(m) # Get the Layer instance at the corresponding index
def analyse(cwd, user, imagefolder, stats, experiments, multi, Rloc2, subfoldernames, names, statsfolderPath, cwdR): """ Main image analysis Gets user image analysis settings from the .csv file. If multiple experiments have been selected by the user (multi) each subfolder will be looped through. A nested loop will then interate through each .tif image and analyse. A .csv file will be produced for each folder analysed with the name of each image and its % neurite density and % myelination. A summary csv file will also be produced with the average % neurite density and % myelination for each subfolder. If statistical analysis has been selected (stats) then MyelinJ's Rscript will be run via the command line. If multple experiments is not selected then all of the images within the selected folder will be analysed together and no summary .csv will be produced. Independ of the analysis settings defined, a processed myelin channel image and a processed neurite channel image will be saved. The images can be any number of subdirectories (folders within folders). Parameters ---------- cwd : string Path for current working directory (location of MyelinJ folder in Fiji). user: string User name imagefolder: string Path to .tiff image folder(s) defined by user. stats: boolean Perform statistical analysing using R? experiments: 2D list of strings list of all the subfolders (experiments) that are in each experimental condition. multi: boolean Analyse multiple experiments? Rloc2: string file path to Rscript location subfoldernames: string name of each subfolder which denoates each individual experiment, if multple experiments are being analysed. names: array array of textfields for each experimental condition defined by user. User will enter the name of each experimental condition. statsfolderPath: string file path to the create statsfolder. cwdR: string file path to MyelinJstats.R """ # read settings from the user name CSV bg = False readsettings = [] imagenames = [] neuritedensity = [] myelinoverlay = [] myelinaverage2 = [] neuriteaverage2 = [] root = cwd filename = user fullpath = os.path.join(root, filename) f = open(fullpath, 'rb') readCSV = csv.reader(f) for row in readCSV: readsettings.append(row[0]) readsettings.append(row[1]) readsettings.append(row[2]) readsettings.append(row[3]) readsettings.append(row[4]) readsettings.append(row[5]) readsettings.append(row[6]) f.close() i = 0 for i in range(len(subfoldernames)): # if multiple experimental conditions has been selected each folder is treated as a # separate experiment and looped through separately otherwise all folders will be # treated as one experiment this only works for sub directories within the main folder. # Further folders will be ignored (each image can be in its own folder for example) if multi is True: # if multiple experiments are being analysed the file path is changed to the # current subfolder settings2 = os.path.join(imagefolder, subfoldernames[i]) if "Windows" in OS: settings2 = settings2 + "\\" elif "Mac" in OS: settings2 = settings2 + "/" else: settings2 = imagefolder # loop through all .tiff files in location for root, dirs, files in os.walk(settings2): for name in files: if name.endswith((".tif")): imagenames.append(os.path.join(name)) # open .tiff image, split channels and # convert to 8bit grey scale. imp = IJ.openImage(os.path.join(root, name)) g = int(readsettings[4]) r = int(readsettings[5]) imp = ChannelSplitter.split(imp) green = imp[g] red = imp[r] conv = ImageConverter(red) conv.convertToGray8() conv = ImageConverter(green) conv.convertToGray8() # thresholding to select cell bodies green2 = green.duplicate() if (readsettings[0] != "0") or (readsettings[1] != "0"): bg = True IJ.setAutoThreshold(green2, readsettings[2]) IJ.setRawThreshold(green2, int(readsettings[0]), int(readsettings[1]), None) Prefs.blackBackground = True IJ.run(green2, "Convert to Mask", "") IJ.run(green2, "Invert LUT", "") if readsettings[7] != "0": IJ.run(green2, "Make Binary", "") IJ.run( green2, "Remove Outliers...", "radius=" + readsettings[7] + " threshold=50 which=Dark") # CLAHE and background subtraction if readsettings[8] == "True": mpicbg.ij.clahe.Flat.getFastInstance().run( green, 127, 256, 3, None, False) if readsettings[9] == "True": calc = ImageCalculator() green = calc.run("Subtract create", green, red) elif readsettings[6] == "True": IJ.run(green, "Subtract Background...", "rolling=50") if readsettings[10] != "0": IJ.run(green, "Subtract...", "value=" + readsettings[10]) # run frangi vesselness pixelwidth = str(green.getCalibration().pixelWidth) IJ.run( green, "Frangi Vesselness (imglib, experimental)", "number=1 minimum=" + pixelwidth + " maximum=" + pixelwidth) green = IJ.getImage() # convert frangi vesselness image to 8bit grey scale conv = ImageConverter(green) conv.convertToGray8() IJ.run(green, "Convert to Mask", "") # remove cell bodies if bg is True: green = ImageCalculator().run("Subtract create", green, green2) # run grey scale morphology filter from MorpholibJ if readsettings[11] != "0": green = green.getProcessor() algo = BoxDiagonalOpeningQueue() algo.setConnectivity(4) result = algo.process(green, int(readsettings[11])) green = ImagePlus("result", result) IJ.run(green, "Invert LUT", "") if len(readsettings) > 14: # sparse neurite image analysis if readsettings[15] == "True": IJ.run( red, "Enhance Local Contrast (CLAHE)", "blocksize=127 histogram=256 maximum=3 mask=*None* fast_(less_accurate)" ) if readsettings[14] == "True": IJ.run(red, "Subtract Background...", "rolling=50") IJ.setAutoThreshold(red, readsettings[16]) IJ.setRawThreshold(red, int(readsettings[17]), int(readsettings[18]), None) IJ.run(red, "Convert to Mask", "") IJ.run(red, "Invert LUT", "") else: # dense neurite image analysis IJ.run( red, "Normalize Local Contrast", "block_radius_x=40 block_radius_y=40 standard_deviations=" + readsettings[12] + " center stretch") IJ.run(red, "Auto Threshold", "method=Default white") IJ.run(red, "Invert LUT", "") if readsettings[3] == "True": IJ.run(red, "Despeckle", "") IJ.saveAs(red, "Jpeg", settings2 + name + "neurites") # get number of neurite pixels # get number of neurite pixels statsneurite = red.getProcessor() statsneurite = statsneurite.getHistogram() neuritedensity.append(statsneurite[255]) IJ.saveAs(green, "Jpeg", settings2 + name + "myelinFinal") # get number of myelin pixels statsmyelin = green.getProcessor() statsmyelin = statsmyelin.getHistogram() myelinoverlay.append(statsmyelin[255]) closeallimages() # get pixel total of image whitepixels = (statsneurite[0]) blackpixels = (statsneurite[255]) totalpixels = whitepixels + blackpixels totalpixels = [totalpixels] * len(neuritedensity) # for each image calculate % myelination as number of myelin pixels # divided by the number of neurite pixels * 100 myelinoverlay = [ x1 / x2 * 100 for (x1, x2) in zip(myelinoverlay, neuritedensity) ] myelinaverage = sum(myelinoverlay) / len(myelinoverlay) myelinaverage2.append(myelinaverage) # for each image calculate % neurite density as neurite pixels divided # by the total number of pixels in the image * 100. neuritedensity = [ x1 / x2 * 100 for (x1, x2) in zip(neuritedensity, totalpixels) ] neuriteaverage = sum(neuritedensity) / len(neuritedensity) neuriteaverage2.append(neuriteaverage) name = "Image names" green = "% myelination" red = "% neurite density" imagenames = [name] + imagenames neuritedensity = [red] + neuritedensity myelinoverlay = [green] + myelinoverlay result = [] result.append(imagenames) result.append(neuritedensity) result.append(myelinoverlay) root = settings2 filename = "Results.csv" fullpath = os.path.join(root, filename) f = open(fullpath, 'wb') writer = csv.writer(f) for d in range(len(result)): row = [result[d]] writer.writerows(row) f.close() # must be reset to 0 for each iteration. y = 0 r = 0 # if statistical analysis is being performed the results .csv file # is also saved to a subfolder within the statistical analysis folder # which denotes the experimental condition the results belong to. if stats is True: # nested for loop to identify correct experimental condition # for the current subfolder being analysed. for y in range(0, len(experiments)): for r in range(0, len(experiments[0])): if experiments[y][r] == subfoldernames[i]: if "Windows" in OS: root = imagefolder + "\\statistical analysis\\" + names[ y].getText() elif "Mac" in OS: root = imagefolder + "/statistical analysis/" + names[ y].getText() filename = subfoldernames[i] + ".csv" fullpath = os.path.join(root, filename) f = open(fullpath, 'wb') writer = csv.writer(f) for e in range(len(result)): row = [result[e]] writer.writerows(row) f.close() break cwd2 = os.getcwd() for files in os.listdir(cwd2): if files.endswith(".csv"): os.remove(os.path.join(cwd2, files)) imagenames = [] myelinoverlay = [] neuritedensity = [] # create .csv summary sheet with average % neurite density # and average % myelination for each subfolder (experiment). if multi is True: name = "Folder name" imagenames = [name] + subfoldernames neuritedensity = [red] + neuriteaverage2 myelinoverlay = [green] + myelinaverage2 result = [] result.append(imagenames) result.append(neuritedensity) result.append(myelinoverlay) if "Windows" in OS: root = imagefolder + "\\" elif "Mac" in OS: root = imagefolder + "/" filename = "Result-Summary.csv" fullpath = os.path.join(root, filename) f = open(fullpath, 'wb') writer = csv.writer(f) for p in range(len(result)): row = [result[p]] writer.writerows(row) f.close() imagenames = [] myelinoverlay = [] neuritedensity = [] # Run Rscript for statistical analysis via the command line if stats is True: cmd = Rloc2 + " " + cwdR + " " + statsfolderPath Runtime.getRuntime().exec(cmd) Finished()
def cellSegmentation(srcDir, dstDir, currentDir, filename, keepDirectories): print "Processing:" # Opening the image print "Open image file", filename imp = IJ.openImage(os.path.join(currentDir, dstDir)) # Put your processing commands here! localinput=srcDir.replace("/", "\\") saveDir = localinput.replace(srcDir, dstDir) string="." dotIndex=filename.find(string) localfile= filename[0:dotIndex] print(localfile) IJ.run("New... ", "name="+f+" type=Table") print(f,"\\Headings:Cell\tarea\tCirc\tAR\tRoundness\tMaximum") IJ.run("Bio-Formats", "open=[" + localinput + os.path.sep + filename +"] autoscale color_mode=Default rois_import=[ROI manager] view=Hyperstack stack_order=XYCZT") IJ.open() idd= WM.getIDList(); imageID= idd[0]; IJ.run("Clear Results") WM.getImage(imageID) IJ.run("Duplicate...", "duplicate channels="+str(x)+"") #Nucleus channel #took away x IJ.run("Z Project...", "projection=[Standard Deviation]");#picture for frame detection IJ.run("8-bit"); IJ.run("Duplicate...", "title=IMAGE");#frame IJ.run("Duplicate...", "title=SUBTRACT");#Background subtraction mask (for frame and watershed) imp=IJ.getImage() pixelWidth=imp.getWidth() pixelWidth=pixelWidth/1647.89 pixelHeight= imp.getHeight() #create subtraction mask, applying constraining maximum (step I) IJ.selectWindow("SUBTRACT") nResults=imp.getStatistics() row = nResults rt_exist = WM.getWindow("Results") if rt_exist==None: rt= ResultsTable() else: rt = rt_exist.getTextPanel().getOrCreateResultsTable() rt.setValue("Max ", 0, row.max) #text file rt.show("Results") u=math.floor(row.mean*3) IJ.run("Max...","value="+str(u)) #constraining maximum of 3-fold mean to reduce effect of extreme values during subtraction #gaussian blurring (step II) IJ.run("Gaussian Blur...", "sigma=100 scaled") #blurring for subtraction mask IJ.selectWindow("IMAGE") pxrollrad = cellradius/pixelWidth; #rolling ball radius in pixels needed (= predefined cell radius[µm]/pixelsize[µm/px]) IJ.run("Subtract Background...", "rolling="+str(pxrollrad)+"") IJ.run("Gaussian Blur...", "sigma=2 scaled") #reduces punctate character of grayscale image ' IM=IJ.selectWindow("IMAGE") SUB=IJ.selectWindow("SUBTRACT") ic().run("SUBTRACT", IM, SUB) #just subtracts two images IJ.selectWindow("IMAGE") #see how to call IJ.run("Duplicate...", "title=AND")#watershed IJ.run("Duplicate...", "title=CHECK")#for checking if maxima exist within selection later #Apply threshold to get binary image of cell borders (step IV) IJ.selectWindow("IMAGE") imp = IJ.getImage() # the current image imp.getProcessor().setThreshold(1, 255, ImageProcessor.NO_LUT_UPDATE) IJ.run("Subtract Background...","...") IJ.run("Convert to Mask", "method=Default background=Dark only black") IJ.run("Fill Holes") #Create watershed line image (step V) IJ.selectWindow("AND") IJ.run("Gaussian Blur...", "sigma=2 scaled") imp=IJ.getImage() pixelWidth=imp.getWidth() pixelWidth=pixelWidth/1647.89 pixelHeight= imp.getHeight() # Saving the image nResults=imp.getStatistics() row = nResults rt.setValue("Max ", 1, row.max) #text file nBins = 256 Hist = HistogramWindow("Histogram",imp,nBins) Table = Hist.getResultsTable() Counts = Table.getColumn(1) #mean gray value of pixels belonging to cells needed (i.e. mean of ONLY non-zero pixel) Sum = 0 #all counts CV = 0 #weighed counts (= counts * intensity) for i in range(0, len(Counts)): #starting with 1 instead of 0. -> 0 intensity values are not considered. Sum += Counts[i] CV += Counts[i]*i m = (CV/Sum) m=math.floor(m) l = math.floor(2*m) #Maxima need to be at least twice the intensity of cellular mean intensity IJ.run("Find Maxima...", "noise="+str(l)+" output=[Segmented Particles] exclude") #watershedding #Combine watershed lines and cell frame (step VI) IJ.selectWindow("IMAGE") imp=IJ.getImage() imp.getProcessor().setThreshold(1, 255, ImageProcessor.NO_LUT_UPDATE) IJ.run(imp, "Watershed", "") #useful imp = IJ.getImage() ip = imp.getProcessor() segip = MaximumFinder().findMaxima( ip, 1, ImageProcessor.NO_THRESHOLD, MaximumFinder.SEGMENTED , False, False) segip.invert() segimp = ImagePlus("seg", segip) segimp.show() mergeimp = RGBStackMerge.mergeChannels(array([segimp, None, None, imp, None, None, None], ImagePlus), True) mergeimp.show() pa_exist = WM.getWindow("Results for PA") if pa_exist==None: pa_rt= ResultsTable() else: pa_rt = pa_exist.getTextPanel().getOrCreateResultsTable() ParticleAnalyzer.setResultsTable(pa_rt) IJ.run("Set Measurements...", "area mean perimeter shape decimal=3") IJ.run("Analyze Particles...", "size=" + str(cellradius) + "-Infinity circularity=0.1-1.00 add"); #Cell bodies detected pa_rt.show("Results for PA ") save_all(srcDir, dstDir, filename, localfile, keepDirectories, imageID)
import sys from ij import IJ # Fiji Jython interpreter implements Python 2.5 which does not # provide support for argparse. error_log = sys.argv[ -8 ] input = sys.argv[ -7 ] iterations = int( sys.argv[ -6 ] ) count = int( sys.argv[ -5 ] ) black_background = jython_utils.asbool( sys.argv[ -4 ] ) pad_edges_when_eroding = jython_utils.asbool( sys.argv[ -3 ] ) tmp_output_path = sys.argv[ -2 ] output_datatype = sys.argv[ -1 ] # Open the input image file. input_image_plus = IJ.openImage( input ) # Create a copy of the image. input_image_plus_copy = input_image_plus.duplicate() image_processor_copy = input_image_plus_copy.getProcessor() try: # Set binary options. options = jython_utils.get_binary_options( black_background=black_background, iterations=iterations, count=count, pad_edges_when_eroding=pad_edges_when_eroding ) IJ.run( input_image_plus_copy, "Options...", options ) # Run the command. IJ.run( input_image_plus_copy, "Make Binary", "" )
lowTH = Auto_Threshold.Default(hist) imp.getProcessor().threshold(lowTH) imp.setDisplayRange(0, lowTH + 1) ImageConverter.setDoScaling(bScale) IJ.run(imp, "8-bit", "") imp.setDisplayRange(0, 255) imp.setTitle(ti + "-bin-" + str_thresh) return ([imp, lowTH]) IJ.run("Close All") git_home = os.getenv('GIT_HOME') rel_dir = "/tips/ImageJ/tif/" path = git_home + rel_dir print(path) img_path = path + "Nuclei.tif" imp_ori = IJ.openImage(img_path) imp_ori.show() # note: it works from jmFijiGen.py! ret = jmg.auto_threshold(imp_ori, "Otsu", bScale=False) IJ.run(ret[0], "Apply LUT", "") # ret[0].setDisplayRange(0, ret[1]+1) ret[0].show() print(ret[1])
""" test_channel_splitter.py Adapted by J. R. Minter from "Channel Merge" here: http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook Date Who What ---------- --- ---------------------------------------------------- 2019-06-16 JRM Initial adaptation. Use the HOME environment variable to get the start of the path """ from ij import IJ from ij.plugin import ChannelSplitter import os home_dir = os.getenv("HOME") imp = IJ.openImage(home_dir + "/Documents/git/tips/ImageJ/tif/mandrill_256.tif") imps = ChannelSplitter.split(imp) imps[0].show() # Channel 1 imps[1].show() # Channel 2 imps[2].show() # Channel 3
def run(): print "===== bBatchConvertTo8Bitv3 =====" # Expecting one argument: the file path if len(sys.argv) < 2: print " We need at least one folder as input" print " Usage: ./fiji-macosx bBatchConvertTo8Bitv3 <folder-path>/" # Prompt user for a folder sourceFolder = DirectoryChooser( "Please Choose A Directory Of .tif Files").getDirectory() if not sourceFolder: return else: sourceFolder = sys.argv[1] #assuming it ends in '/' #get user options okGo = getOptions() # creates {numberOfChannels, replaceExisting} if okGo == -1: return 0 destFolder = sourceFolder + "channels8_256/" #make destination directory if not os.path.isdir(destFolder): os.makedirs(destFolder) print " Processing source folder: ", sourceFolder print " Saving to destination folder: ", destFolder numOpened = 0 numSaved = 0 for filename in os.listdir(sourceFolder): startWithDot = filename.startswith(".") isMax = filename.endswith("max.tif") isTif = filename.endswith(".tif") if (not startWithDot) and (not isMax) and (isTif): shortName, fileExtension = os.path.splitext(filename) outPath = destFolder + "/" + filename outPath1 = destFolder + "/" + shortName + "_ch1" + ".tif" outPath2 = destFolder + "/" + shortName + "_ch2" + ".tif" #before we open, check if eventual dest exists if not replaceExisting: if numberOfChannels == 2 and os.path.exists( outPath1) and os.path.exists(outPath2): print " 512 Destination file exists, not saving the image.", filename continue #with next iteration if numberOfChannels == 1 and os.path.exists(outPath): print " 512 Destination file exists, not saving the image.", filename continue #with next iteration print " ====================================" print " -> Opening", sourceFolder + filename imp = IJ.openImage(sourceFolder + filename) if imp is None: print " Could not open image from file:", filename continue #with next iteration imp.show() numOpened += 1 #i can get properties as long list of {key=value} #how do i then set each property in new imp1/imp2? Do IJ.openImagehave ot loop? #print imp.getProperties() #in the future IJ.openImagehavewant to have option to scale down to 512X512 #run("Scale...", "x=- y=- z=1.0 width=512 height=512 depth=196 interpolation=Bilinear average process create title=20131007_a144_008_ch1-1.tif"); print " Image is: " + str(imp.width) + " X " + str( imp.height) + " X " + str(imp.getNSlices()) #if imp.getBitDepth() == 16: if imp.width > 512 and imp.height > 512: print " Converting to 512X512 with 'Scale'" #IJ.run("8-bit") theTitle = "tmpOutput" IJ.run( imp, "Scale...", "x=- y=- z=1.0 width=512 height=512 depth=" + str(imp.getNSlices()) + " interpolate=Bilinear average process create title=" + theTitle) imp = IJ.getImage() #bug: original window is left open if numberOfChannels == 2: print " deinterleaving" IJ.run("Deinterleave", "how=2") #makes 2 window #ch2 imp2 = IJ.getImage() fs = FileSaver(imp2) print " Saving 8bit File to", outPath2 fs.saveAsTiffStack(outPath2) numSaved += 1 imp2.changes = 0 imp2.close() #ch1 imp1 = IJ.getImage() fs = FileSaver(imp1) print " Saving 8bit File to", outPath2 fs.saveAsTiffStack(outPath1) numSaved += 1 imp1.changes = 0 imp1.close() elif numberOfChannels == 1: #single channel fs = FileSaver(imp) print " Saving 8bit File to", outPath fs.saveAsTiffStack(outPath) numSaved += 1 imp.changes = 0 imp.close() else: print " File was not larger than 512X512???" imp.close() #close original else: if isTif: #print " ====================================" print " -> Ignoring .tif:", filename print " ===" print " bBatchConvertTo8Bitv3.py is Done, Number Opened " + str( numOpened) + ", Number Saved ", str(numSaved) print " ==="
from ij import IJ from ij.plugin import ImageCalculator IJ.run("Close All") path_tif = "/Users/jrminter/Documents/git/tips/ImageJ/tif/lena-eyes.tif" path_jpg = "/Users/jrminter/Documents/git/tips/ImageJ/jpg/lena-eyes.jpg" lena_tif = IJ.openImage(path_tif) lena_tif.show() IJ.saveAs(lena_tif, "Jpeg", path_jpg) for i in range(201): lena_jpg = IJ.openImage(path_jpg) IJ.saveAs(lena_jpg, "Jpeg", path_jpg) IJ.run(lena_jpg, "32-bit", "") IJ.run(lena_tif, "32-bit", "") lena_tif.show() lena_jpg.show() ic = ImageCalculator() imp_sub = ic.run("Subtract create 32-bit", lena_jpg, lena_tif) IJ.run(imp_sub, "Enhance Contrast", "saturated=0.35") IJ.run(imp_sub, "8-bit", "") imp_sub.show()
os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/conv_merged/out/") os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/conv_merged/mc/") #os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/storm_561/") #os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/storm_561/out/") #os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/storm_561/mc/") for j in range (1, (xdim+1)): os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/conv_561/c_" + "%01d" % j + "/") os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/conv_561adj/c_" + "%01d" % j + "/") os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/conv_merged/c_" + "%01d" % j + "/") #os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/storm_561/c_" + "%01d" % j + "/") os.mkdir (ISanalysisfolder + "%04d" % i + "/aligned/storm_merged/c_" + "%01d" % j + "/") ## this section merges 2 storm images and uses the average for j in range (0, conv_images_per_section): k= str(j) if os.path.isfile((ISanalysisfolder + "%04d" % i + "/aligned/after_dist_corr/488storm_" + "%03d" % i + "_" + "%02d" % j + "_0.tif")): imp = IJ.openImage((ISanalysisfolder + "%04d" % i + "/aligned/after_dist_corr/488storm_" + "%03d" % i + "_" + "%02d" % j + "_0.tif")) imp.show() IJ.run(imp, "Canvas Size...", "width=2350 height=2350 position=Center zero"); IJ.run(imp, "Canvas Size...", "width=2560 height=2560 position=Center zero"); #imp = IJ.openImage((ISanalysisfolder + "%04d" % i + "/aligned/after_dist_corr/488storm_" + "%03d" % i + "_" + "%02d" % j + "_1.tif")) imp = IJ.openImage((ISanalysisfolder + "%04d" % i + "/aligned/after_dist_corr/488storm_" + "%03d" % i + "_" + "%02d" % j + "_0.tif")) imp.show() IJ.run(imp, "Canvas Size...", "width=2350 height=2350 position=Center zero"); IJ.run(imp, "Canvas Size...", "width=2560 height=2560 position=Center zero"); IJ.run(imp, "Images to Stack", "name=Stack title=[] use"); imp = IJ.getImage() IJ.run(imp, "Z Project...", "start=1 stop=2 projection=[Average Intensity]"); imp = IJ.getImage() else: imp = IJ.createImage("Untitled", "8-bit Black", 2560, 2560, 1); IJ.saveAs(imp, "Tiff", (ISanalysisfolder + "%04d" % i + "/aligned/488storm_" + "%03d" % i + "_" + "%02d" % j + ".tif"));
from ij import IJ import struct """ from: http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook """ def s2u8bit(v): return struct.unpack("B", struct.pack("b", v))[0] imp = IJ.openImage("http://imagej.nih.gov/ij/images/blobs.gif") # imp = IJ.getImage() signedpix = imp.getProcessor().getPixels() pix = map(s2u8bit, signedpix) #check that the conversion worked. # this example was made for binary image, to print only values 255 for j in range(len(pix)): curval = pix[j] #curval = s2u8bit(curval) if curval is 0: print '--' else: print curval
from ij.io import OpenDialog import os from ij.measure import ResultsTable IJ.run("Clear Results") #clear results od = OpenDialog("Choose a file", None) filename = od.getFileName() directory = od.getDirectory() path = od.getPath() print filename print directory print path imp = IJ.openImage(path) imp.show() #imp = IJ.getImage() IJ.run("8-bit") IJ.run("Invert") #use this line if we need to invert the image IJ.run("Set Measurements...", "area min redirect=None decimal=3") IJ.run("Analyze Particles...", "display") #save the results table as a CSV #https://www.ini.uzh.ch/~acardona/fiji-tutorial/#s2 save_string = directory + 'Results_random_circles_CoronaTime.csv' IJ.saveAs("Results", save_string) #table = ResultsTable.getResultsTable()
def processImages(cfg, wellName, wellPath, c, imgFiles): points = [] chanStr = 'ch%(channel)02d' % { "channel": c } chanName = cfg.getValue(ELMConfig.chanLabel)[c] if (chanName == ELMConfig.RED): chanPixBand = 0 elif (chanName == ELMConfig.GREEN): chanPixBand = 1 elif (chanName == ELMConfig.BLUE): chanPixBand = 2 elif (chanName == ELMConfig.YELLOW): chanPixBand = 0 else: chanPixBand = -1 chanPixBand numExclusionPts = 0 numColorThreshPts = 0 ptCount = 0 print "\tProcessing channel: " + chanName for t in range(cfg.getValue(ELMConfig.numT)): tStr = cfg.getTStr(t) for z in range(0, cfg.getValue(ELMConfig.numZ)): zStr = cfg.getZStr(z) currIP = IJ.openImage(imgFiles[z][t][0]) origImage = currIP.duplicate() if cfg.getValue(ELMConfig.debugOutput): WindowManager.setTempCurrentImage(currIP) IJ.saveAs( 'png', os.path.join( wellPath, "Orig_" + wellName + "_" + zStr + "_" + chanStr + ".png")) # We need to get to a grayscale image, which will be done differently for different channels dbgOutDesc = wellName + "_" + tStr + "_" + zStr + "_" + chanStr currIP = ELMImageUtils.getThresholdedMask(currIP, c, z, 1, chanName, cfg, wellPath, dbgOutDesc) if (not currIP): continue currProcessor = currIP.getProcessor() #WindowManager.setTempCurrentImage(currIP); #currIP.show() imgWidth = currIP.getWidth() imgHeight = currIP.getHeight() for x in range(0, imgWidth): for y in range(0, imgHeight): if not currProcessor.get(x, y) == 0x00000000: ptCount += 1 ptX = x * cfg.getValue(ELMConfig.pixelWidth) ptY = y * cfg.getValue(ELMConfig.pixelHeight) ptZ = -z * cfg.getValue(ELMConfig.pixelDepth) colorPix = origImage.getPixel(x, y) red = colorPix[0] green = colorPix[1] blue = colorPix[2] # Check that point meets color threshold aboveColorThresh = not cfg.hasValue(ELMConfig.pcloudColorThresh) \ or colorPix[chanPixBand] > cfg.getValue(ELMConfig.pcloudColorThresh) # Check that point isn't in exclusion zone outsideExclusion = not (cfg.hasValue(ELMConfig.pcloudExclusionX) and cfg.hasValue(ELMConfig.pcloudExclusionY)) \ or (x < cfg.getValue(ELMConfig.pcloudExclusionX) or y < cfg.getValue(ELMConfig.pcloudExclusionY)) if (aboveColorThresh and outsideExclusion): points.append([ptX, ptY, ptZ, red, green, blue]) elif (not aboveColorThresh): numColorThreshPts += 1 elif (not outsideExclusion): numExclusionPts += 1 currIP.close() origImage.close() print "\t\tTotal points considered: " + str(ptCount) print "\t\tColor Threshold Skipped " + str( numColorThreshPts) + " points." print "\t\tExclusion Zone Skipped " + str(numExclusionPts) + " points." numPoints = len(points) cloudName = chanName + "_" + tStr + "_cloud.ply" resultsFile = open(os.path.join(wellPath, cloudName), "w") resultsFile.write("ply\n") resultsFile.write("format ascii 1.0\n") resultsFile.write("element vertex " + str(numPoints) + "\n") resultsFile.write("property float x\n") resultsFile.write("property float y\n") resultsFile.write("property float z\n") resultsFile.write("property uchar red\n") resultsFile.write("property uchar green\n") resultsFile.write("property uchar blue\n") resultsFile.write("end_header\n") for line in points: resultsFile.write( "%f %f %f %d %d %d\n" % (line[0], line[1], line[2], line[3], line[4], line[5])) resultsFile.close() if numPoints > 0: compute3DStats(cfg, wellPath, wellName, chanName, cloudName, imgWidth, imgHeight) else: print( 'Well %s, channel %s (%s) - Skipping 3D stats since we have no points!' % (wellName, chanName, chanStr)) print ""