def run(): global pmWin global imgData helpText = "This is Point Marker, " + \ "a program for marking points in images.\n\n" + \ ">> Press OK to Select a file for storing points infomation.\n"+\ ">> TIF-Images within the same directory will be auto-loaded." MessageDialog(IJ.getInstance(), "Point Marker Guide", helpText) fileChooser = OpenDialog("Point Marker: Choose working directory and file") outfilePath = fileChooser.getPath() imgDir = fileChooser.getDirectory() if not imgDir: return imgPaths = [] if imgDir: for root, directories, filenames in os.walk(imgDir): for filename in filenames: if not filename.endswith(".tif"): continue imgPaths.append(os.path.join(root, filename)) pointsTable1 = readPoints(outfilePath) imgData = PointMarkerData(imgPaths, outfilePath, pointsTable1) IJ.setTool("multipoint") PointRoi.setDefaultSize(3) pmWin = PointMarkerWin(imgData) pmWin.win.setLocation(IJ.getInstance().getLocation()) prepareNewImage(imgData)
def open_image(): """ opens an image, returns an imagePlus object and its name in that order """ # Prompt user for the input image file. print "open_image begin" op = OpenDialog("Choose input image...", "") if op.getPath() == None: sys.exit('User canceled dialog') # open selected image and prepare it for analysis inputName = op.getFileName() inputDirPath = op.getDirectory() inputPath = inputDirPath + inputName # Strip the suffix off of the input image name if inputName[-4] == ".": inputPrefix = inputName[:-4] # assumes that a suffix exists else: inputPrefix = inputName #opens image and returns it. inputImp = ImagePlus(inputPath) print "open_image finis" return inputImp, inputPrefix, inputDirPath
def get_path(): od = OpenDialog("Choose Spinning disk file", None) srcDir = od.getDirectory() if srcDir is None: # User canceled the dialog sys.exit(0) file = os.path.join(srcDir, od.getFileName()) return srcDir,file
def main(): from ij.io import OpenDialog od=OpenDialog("Select a slide scanner file") filepath = od.getPath() print(filepath) open_files_and_roi(filepath)
def input_file_location_chooser(default_directory, filt='*.tif', message=None): if message == None: message = 'Choose original file...' od = OpenDialog(message, default_directory, filt) file_path = od.getPath() if file_path is None: raise IOError('no input file chosen') return file_path
def file_location_chooser(default_directory): """choose folder locations and prepare output folder""" # input od = OpenDialog('Choose original file...', default_directory, '*.tif') file_path = od.getPath() if file_path is None: raise IOError('no input file chosen') return file_path
def openAtFolder(event): if 0 == path.getText().find("http"): IJ.showMessage("Can't open folder: it's an URL") return directory = os.path.dirname(path.getText()) od = OpenDialog("Open", directory, None) filepath = od.getPath() if filepath: IJ.open(filepath)
def file_location_chooser(default_directory): """choose input data location""" od = OpenDialog('Choose original file...', default_directory, '*.tif'); file_path = od.getPath(); if file_path is None: raise IOError('no input file chosen'); return file_path;
def get_metadata_table_filepath(): """Get a the metadata template filename""" d = GenericDialog("Metadata Template") d.addMessage("Please choose a metadata template") d.enableYesNoCancel("OK", "Cancel") d.hideCancelButton() d.showDialog() if d.wasOKed(): od = OpenDialog("Metadata File") return os.path.join(od.getDirectory(), od.getFileName()) else: return
def select_file(self): global img_paths, opened_imgs path = OpenDialog('Select an image file').getPath(); if(path): shortened_path = path[path.rindex('/')+1:]; print(shortened_path); img_paths[shortened_path] = path; opened_imgs.append(IJ.openImage(path)); opened_imgs[-1].show(); dropdown.addItem(shortened_path); else: print("file not selected/found")
def get_metadata(params): """get image metadata, either from the image file or from acquisition-time metadata""" if params.metadata_source == "Image metadata": try: reader = ImageReader() ome_meta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(ome_meta) reader.setId(params.input_image_path) reader.close() params.setFrameInterval( ome_meta.getPixelsTimeIncrement(0).value()) params.setIntervalUnit( ome_meta.getPixelsTimeIncrement(0).unit().getSymbol()) params.setPixelPhysicalSize( ome_meta.getPixelsPhysicalSizeX(0).value()) params.setPixelSizeUnit( ome_meta.getPixelsPhysicalSizeX(0).unit().getSymbol()) params.setMetadataSourceFile(None) except Exception as e: print(e.message) mbui.warning_dialog([ "There was a problem getting metadata from the image: ", e.message, "Please consider using acquisition metadata instead (click OK). ", "Or, quit the analysis run and investigate image metadata by hand. " ]) params.setMetadataSource("Acquisition metadata") if params.metadata_source == "Acquisition metadata": od = OpenDialog('Choose acquisition metadata file...', os.path.dirname(params.input_image_path), '*.txt') file_path = od.getPath() if file_path is None: raise IOError('no metadata file chosen') acq_metadata_dict = import_iq3_metadata(file_path) try: params.setFrameInterval(acq_metadata_dict['frame_interval']) except KeyError: params.setFrameInterval(1.0) try: params.setIntervalUnit(acq_metadata_dict['time_unit']) except KeyError: params.setIntervalUnit('frames') params.setPixelPhysicalSize(acq_metadata_dict['x_physical_size']) params.setPixelSizeUnit(acq_metadata_dict['x_unit']) params.setMetadataSourceFile(file_path) return params
def getThresholds(): thresholds = {} gd = GenericDialog("Threshold options") gd.addChoice("How would you like to set your thresholds?", ["default", "use threshold csv file"], "default") gd.showDialog() choice = gd.getNextChoice() log.write("Option: " + choice + "\n") if choice == "use threshold csv file": path = OpenDialog("Open the thresholds csv file") log.write("File used: " + path.getPath() + "\n") with open(path.getPath()) as csvfile: reader = csv.DictReader(csvfile) for row in reader: thresholds = row return thresholds
def getNames(): names = {} gd = GenericDialog("Naming options") gd.addChoice("How would you like to name your results for each well?", ["default", "use name convention csv file"], "default") gd.showDialog() choice = gd.getNextChoice() log.write("Option: " + choice + "\n") if choice == "use name convention csv file": path = OpenDialog("Open the names csv file") log.write("File used: " + path.getPath() + "\n") with open(path.getPath()) as csvfile: reader = csv.DictReader(csvfile) for row in reader: names[row['Row']] = row return names
def getNames(): info = [] gd = GenericDialog("Naming options") gd.addChoice("How would you like to output your results?", ["default", "use information csv file"], "default") gd.showDialog() choice = gd.getNextChoice() log.write("Option: " + choice + "\n") if choice == "use information csv file": path = OpenDialog("Open the names csv file") log.write("File used: " + path.getPath() + "\n") with open(path.getPath()) as csvfile: reader = csv.DictReader(csvfile) for row in reader: info.append(row) return info
def run(): # Choose a file to open od = OpenDialog("Choose multi-image file", None) srcDir = od.getDirectory() if srcDir is None: # User canceled the dialog return path = os.path.join(srcDir, od.getFileName()) # Choose a directory to store each slice as a file targetDir = DirectoryChooser("Choose target directory").getDirectory() if targetDir is None: # User canceled the dialog return # Ready: cs = ChannelSeparator() cs.setId(path) print "cs", cs bf = BFVirtualStack(path, cs, False, False, False) for sliceIndex in xrange(1, bf.getSize() + 1): print "Processing slice", sliceIndex ip = bf.getProcessor(sliceIndex) sliceFileName = os.path.join(targetDir, str(sliceIndex) + ".tif") FileSaver(ImagePlus(str(sliceIndex), ip)).saveAsTiff(sliceFileName)
sys.path.append(os.path.realpath(imctool_dir)) import imctools.imagej.library as lib def view_image5d_ome(img, ome_meta): """ :param img: :param ome_meta: :return: """ nchannels = ome_meta.getChannelCount(0) channel_names = [ome_meta.getChannelName(0, i) for i in range(nchannels)] img = lib.get_image5d(imgName=ome_meta.getImageName(0), img_stack=img.getStack(), channel_names=channel_names) img.show() def load_and_view(file_name): (imag, omeMeta) = lib.load_ome_img(file_name) view_image5d_ome(imag, omeMeta) op = OpenDialog('Choose multichannel TIFF') file = os.path.join(op.getDirectory(), op.getFileName()) load_and_view(file_name=file)
def processFile(): # start logging IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n") # ask user for file ofd = OpenDialog("Choose a file", None) filename = ofd.getFileName() if filename is None: IJ.log("User canceled the dialog!\nImage processing canceled!\n") return directory = ofd.getDirectory() filepath = directory + filename IJ.log("File path: " + filepath) if not filename.endswith(".oir"): IJ.log("Not an Olympus (.oir) file.\nNo image to process.\n") return filenameExExt = os.path.splitext(filename)[0] # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(filepath) numChannels = reader.getSizeC() numSlices = reader.getSizeZ() numFrames = reader.getSizeT() seriesCount = reader.getSeriesCount() globalMetadata = reader.getGlobalMetadata() seriesMetadata = reader.getSeriesMetadata() objLensName = globalMetadata['- Objective Lens name #1'] areaRotation = float(seriesMetadata['area rotation #1']) acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1']) if 'regionInfo rotation #1' in seriesMetadata: regionInfoRotation = float(seriesMetadata['regionInfo rotation #1']) else: regionInfoRotation = float(0) totalRotation = areaRotation + regionInfoRotation physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) pxSizeX = physSizeX.value(UNITS.MICROM) pxSizeY = physSizeY.value(UNITS.MICROM) # log metadata IJ.log("\nMETADATA") #IJ.log("Filename: " + filepath) IJ.log("Number of series: " + str(seriesCount)) IJ.log("Number of channels: " + str(numChannels)) IJ.log("Number of frames: " + str(numFrames)) IJ.log("Number of slices: " + str(numSlices)) IJ.log("Objective lens: " + objLensName) IJ.log("FOV rotation: " + str(areaRotation)) IJ.log("ROI rotation: " + str(regionInfoRotation)) IJ.log("Total rotation: " + str(totalRotation)) IJ.log("Pixel size:") IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol()) IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol()) # ask user to identify dichroic mirror used for each channel gdDM = GenericDialog("Dichroic mirrors") DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] for i in range(numChannels): gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0]) gdDM.addCheckbox("Merge channels", False) gdDM.showDialog() if gdDM.wasCanceled(): IJ.log("User canceled the dialog!\nImage processing canceled!\n") return dichroics = [] for i in range(numChannels): dichroics.append(gdDM.getNextChoice()) merge = gdDM.getNextBoolean() IJ.log("\nUser selected dichroic mirrors") for i in range(numChannels): IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i]) if merge: channels = [] chDict = {} for i in range(numChannels): chName = "Channel"+str(i+1) channels.append(chName) chDict[chName] = i channels.append("NONE") colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"] gdMerge = GenericDialog("Merge channels") for c in colourChoices: gdMerge.addChoice(c + ":", channels, channels[numChannels]) gdMerge.showDialog() if gdMerge.wasCanceled(): IJ.log("User canceled the dialog!\nImage processing canceled!\n") return IJ.log("\nUser selected channel colours") mergeList = [] for i in range(len(colourChoices)): ch = gdMerge.getNextChoice() if ch == "NONE": mergeList.append(None) else: mergeList.append(chDict[ch]) IJ.log("\t\t" + colourChoices[i] + ": " + ch) # ask user for an output directory dc = DirectoryChooser("Choose folder for output") od = dc.getDirectory() if od is None: IJ.log("User canceled the dialog!\nImage processing canceled!\n") return if merge: tifDir = od + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated temporary folder: " + tifDir + "\n") else: IJ.log("Unable to create temporary folder!\n") else: tifDir = od + filenameExExt + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated subfolder: " + tifDir + "\n") else: IJ.log("\nSubfolder " + tifDir + " already exists") # correct images tifFilePaths = [] for i in range(numChannels): ip = extractChannel(oirFile=filepath, ch=i) if dichroics[i] == "DM1": IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.") else: offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]]) xom = offsets['x'] yom = offsets['y'] if abs(totalRotation) > 0.1: rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation) xom = rotOff['x'] yom = rotOff['y'] xop = int(round(xom/pxSizeX)) yop = int(round(yom/pxSizeY)) IJ.log("Channel " + str(i+1) + " offsets") IJ.log("\t\tMicrometres") IJ.log("\t\t\t\tx = " + str(xom)) IJ.log("\t\t\t\ty = " + str(yom)) IJ.log("\t\tPixels") IJ.log("\t\t\t\tx = " + str(xop)) IJ.log("\t\t\t\ty = " + str(yop)) IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack") tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif" tifFilePaths.append(tifFilePath) if os.path.exists(tifFilePath): IJ.log("\nOutput file exists: " + tifFilePath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") return FileSaver(ip).saveAsTiff(tifFilePath) if merge: for i in range(len(mergeList)): if mergeList[i] != None: mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]]) merged = RGBStackMerge.mergeChannels(mergeList, False) mergedChannelFilepath = od + filenameExExt + ".tif" if os.path.exists(mergedChannelFilepath): IJ.log("\nOutput file exists: " + mergedChannelFilepath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") FileSaver(merged).saveAsTiff(mergedChannelFilepath) for tf in tifFilePaths: os.remove(tf) os.rmdir(tifDir) IJ.log("\nFinished processing file:\n" + filepath + "\n") if merge: IJ.log("Image file with channels aligned:\n" + od + filenameExExt + ".tif\n") else: IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
if __name__ == '__main__': print("#\n# Elastix registration\n#") # # GET PARAMETERS # print("#\n# Parameters\n#") # # Load gui parameters # od = OpenDialog("Select parameter file (press CANCEL if you don't have one)", None) f = od.getPath() if f: print('loading parameters from file') f = open(f, 'r'); p_gui = pickle.load(f); f.close() else: print('starting from default parameters') # make parameter structure if it has not been loaded p_gui = {} # exposed to GUI p_gui['expose_to_gui'] = {'value': ['input_folder', 'reg_exp', 'HDF5_data_set_name', 'output_folder', 'output_format', 'output_bit_depth', 'map_to_zero', 'map_to_max', 'binning', 'binning_x','binning_y','binning_z','save_xyz_projections','save_volume_data']} p_gui['input_folder'] = {'choices': '', 'value': 'C:\\Users\\acquifer\\Desktop\\882-reg3', 'type': 'folder'}
for spot in ls: values = [spot.ID(), trackID, spot.getFeature('FRAME'), \ spot.getFeature('POSITION_X'), spot.getFeature('POSITION_Y'), spot.getFeature('POSITION_Z')] for i in range(nChannels): values.append(spot.getFeature('MEAN_INTENSITY%02d' % (i+1))) IJ.log(rowStr % tuple(values)) l1 = (values[0], values[1], values[2], values[3], values[4], values[5], values[7], values[8]) wr.writerow(l1) myfile.close() IJ.selectWindow("Merged") IJ.run("Close") od = OpenDialog("Time Laps Images", "") firstDir = od.getDirectory() fileList = os.listdir(firstDir) if "DisplaySettings.json" in fileList: fileList.remove("DisplaySettings.json") if ".DS_Store" in fileList: fileList.remove(".DS_Store") totalCount = [] i = 1 for fileName in fileList: currentFile = firstDir + fileName print(firstDir) IJ.run("Bio-Formats Importer", "open=" + currentFile + " autoscale color_mode=Composite view=Hyperstack stack_order=XYCZT") track()
#filename = os.path.basename(srcpath) # print(filename) # parentdirectory = os.path.dirname(srcpath) #basename = os.path.splitext(filename)[0] # print(basename) #imp = IJ.openImage(srcpath) # imp = IJ.getImage() CELLAREA = 36.7 # in um2 PIXSIZE = 0.307 CELLDENSITY = .000878 # cells per um2 CHANNEL = 3 # red channel TRIALS = 5 # number of simulations # user chooses the file od = OpenDialog("Choose multi-image file", None) srcDir = od.getDirectory() # print("directory = "+srcDir) filename = od.getFileName() # print("file = "+filename) path = os.path.join(srcDir, od.getFileName()) basename = os.path.splitext(filename)[0] # print("base = "+basename) # print("path = "+path) # get the target image imp = IJ.openImage(path) imp = IJ.getImage() # get image size, dimensions, nChannels width = imp.getWidth()
from ij import IJ, ImagePlus #load the FIJI modules from ij.io import FileSaver from ij.io import OpenDialog import os #from ij.measure import ResultsTable from fiji.threshold import Auto_Threshold IJ.run("Clear Results") od = OpenDialog("Choose a file", None) filename = od.getFileName() directory = od.getDirectory() path = od.getPath() print filename print directory print path imp = IJ.openImage(path) imp.show() IJ.run(imp, "8-bit", "") imp = IJ.getImage() hist = imp.getProcessor().getHistogram() lowTH = Auto_Threshold.Otsu(hist) print lowTH imp.getProcessor().threshold(lowTH) #pulled from http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook#pluginauto_threshold #imp2 = IJ.getImage()
from ij.io import OpenDialog from ij.io import FileSaver from ij.gui import WaitForUserDialog from ij.plugin import MontageMaker empty = "/home/bic/rthomas/Desktop/Link to 12CellLinesPaper/emptymontage.tif" files = [] dc = DirectoryChooser("Choose an input directory") inputDirectory = dc.getDirectory() dc = DirectoryChooser("Select an output directory") outputDirectory = dc.getDirectory() path = OpenDialog("Open the filenames csv file") with open(path.getPath()) as csvfile: reader = csv.DictReader(csvfile) for row in reader: files.append(row) for sets in files: name = "" for filename in sets: if os.path.isfile(inputDirectory + filename): imp = IJ.openImage(inputDirectory + filename) else: imp = IJ.openImage(empty) imp.show()
from __future__ import print_function import os from ij import IJ, WindowManager from ij.io import OpenDialog #from loci.plugins import BF #path = '/Users/cudmore/box/data/sami/Cell_1/1_5ADVMLEG1L1.oir' path = '/Users/cudmore/box/data/sami/200108/WT_Female/Cell_8/8_5ADVMLEG1L1_ch2.tif' # ask user for file. I do not know how to handle when users hits cancel??? Script will just fail if path is None or not os.path.isfile(path): notUsedBy_oxs = 'Open a _ch2.tif file' path = OpenDialog(notUsedBy_oxs).getPath() if path is None: exit() print(' user selected path:', path) fileName = os.path.basename(path) # load imp = IJ.openImage(path) imp.show() # save channel 1 windowName = 'C1-' + fileName IJ.selectWindow(windowName) windowName_notSure = ij.WindowManager.getImage(windowName)
table.getInputMap().put(KeyStroke.getKeyStroke(KeyEvent.VK_ENTER, 0), "enter") table.getActionMap().put("enter", OpenImageFromTableCell()) # return model, table, regex_field, frame def launch(model): def run(): makeUI(model) return run if txt_file is None: od = OpenDialog("Choose a text file listing file paths") txt_file = od.getPath() if txt_file: model = TableModel(txt_file) SwingUtilities.invokeLater(launch(model)) # FOR THE I2K WORKSHOP: # Enable changing text font size in all components by control+shift+(plus|equals)/minus components = [] tables = [] frames = [] def addFontResizing(): global frames
from ij.gui import Plot, Arrow from jarray import array from ij.io import OpenDialog import csv from org.apache.commons.lang import ArrayUtils import java.awt.Color scale = 5 od = OpenDialog("Choose a file", "") folder = od.getDirectory() fName = od.getFileName() path = folder + fName x1 = [] y1 = [] x2 = [] y2 = [] dx = [] dy = [] #Draw Circles radii 30 cx = [] cy = [] for x in range(-7, 8, 1): for y in range(-7, 8, 1): if ((x**2) + (y**2)) <= (7.5**2): cx.append(x) cy.append(y) fReader = csv.reader(open(path), delimiter=",")
'[Folder ' + str(i) + ' of ' + str(numFolders) + '] with ' + str(numTif) + ' tif files in folder ' + folder, 1) for tif in tifList: bPrintLog(tif, 2) # do it bPrintLog( '=== bALignBatchBatch is starting alignment on all stacks in folder: ' + folder, 2) #bALignBatch.runOneFolder(folder) i += 1 bPrintLog('bAlignBatchBatch done at', 0) # if __name__ == '__main__': srcDir = OpenDialog( 'Select a text file with your folders to batch process') print srcDir if srcDir: print srcDir.getDirectory() print srcDir.getFileName() fullPath = srcDir.getDirectory() + srcDir.getFileName() opentextfile(fullPath) # run() else: bPrintLog('Cancelled by user', 0)
def generateDeconvolutionScriptUI(srcDir, tgtDir, calibration, preCropAffines, ROI, postCropAffines): """ Open an UI to automatically generate a script to: 1. Register the views of each time point TM folder, and deconvolve them. 2. Register deconvolved time points to each other, for a range of consecutive time points. Will ask for the file path to the kernel file, and also for the range of time points to process, and for the deconvolution iterations for CM00-CM01, and CM02-CM03. """ template = """ # AUTOMATICALLY GENERATED - %s import sys, os sys.path.append("%s") from lib.isoview import deconvolveTimePoints from mpicbg.models import RigidModel3D, TranslationModel3D from net.imglib2.img.display.imagej import ImageJFunctions as IL # The folder with the sequence of TM\d+ folders, one per time point in the 4D series. # Each folder should contain 4 KLB files, one per camera view of the IsoView microscope. srcDir = "%s" # A folder to save deconvolved images in, and CSV files describing features, point matches and transformations targetDir = "%s" # Path to the volume describing the point spread function (PSF) kernelPath = "%s" calibration = [%s] # An array with 3 floats (identity--all 1.0--because the coarse affines, that is, # the camera transformations, already include the scaling to isotropy computed using the original calibration. # The transformations of each timepoint onto the camera at index zero. def cameraTransformations(dims0, dims1, dims2, dims3, calibration): return { 0: [%s], 1: [%s], 2: [%s], 3: [%s] } # Deconvolution parameters paramsDeconvolution = { "blockSizes": None, # None means the image size + kernel size. Otherwise specify like e.g. [[128, 128, 128]] for img in images] "CM_0_1_n_iterations": %i, "CM_2_3_n_iterations": %i, } # Joint dictionary of parameters params = {} params.update(paramsDeconvolution) # A region of interest for each camera view, for cropping after registration but prior to deconvolution roi = ([%s], # array of 3 integers, top-left coordinates [%s]) # array of 3 integers, bottom-right coordinates # All 4 cameras relative to CM00 fineTransformsPostROICrop = \ [[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], [%s], [%s], [%s]] deconvolveTimePoints(srcDir, targetDir, kernelPath, calibration, cameraTransformations, fineTransformsPostROICrop, params, roi, fine_fwd=True, subrange=range(%i, %i)) """ od = OpenDialog("Choose kernel file", srcDir) kernel_path = od.getPath() if not kernel_path: JOptionPane.showMessageDialog(None, "Can't proceed without a filepath to the kernel", "Alert", JOptionPane.ERROR_MESSAGE) return panel = JPanel() panel.setBorder(BorderFactory.createEmptyBorder(10,10,10,10)) gb = GridBagLayout() panel.setLayout(gb) gc = GBC() msg = ["Edit parameters, then push the button", "to generate a script that, when run,", "will execute the deconvolution for each time point", "saving two 3D stacks per time point as ZIP files", "in the target directory under subfolder 'deconvolved'.", "Find the script in a new Script Editor window.", " "] gc.gridy = -1 # init for line in msg: label = JLabel(line) gc.anchor = GBC.WEST gc.gridx = 0 gc.gridy += 1 gc.gridwidth = 2 gc.gridheight = 1 gb.setConstraints(label, gc) panel.add(label) strings = [["Deconvolution iterations", "CM_0_1_n_iterations", "CM_2_3_n_iterations"], ["Range", "First time point", "Last time point"]] params = {"CM_0_1_n_iterations": 5, "CM_2_3_n_iterations": 7, "First time point": 0, "Last time point": -1} # -1 means last insertFloatFields(panel, gb, gc, params, strings) def asString(affine): matrix = zeros(12, 'd') affine.toArray(matrix) return ",".join(imap(str, matrix)) def generateScript(event): script = template % (str(datetime.now()), filter(lambda path: path.endswith("IsoView-GCaMP"), sys.path)[-1], srcDir, tgtDir, kernel_path, ", ".join(imap(str, calibration)), asString(preCropAffines[0]), asString(preCropAffines[1]), asString(preCropAffines[2]), asString(preCropAffines[3]), params["CM_0_1_n_iterations"], params["CM_2_3_n_iterations"], ", ".join(imap(str, ROI[0])), ", ".join(imap(str, ROI[1])), asString(postCropAffines[1]), asString(postCropAffines[2]), asString(postCropAffines[3]), params["First time point"], params["Last time point"]) tab = None for frame in JFrame.getFrames(): if str(frame).startswith("org.scijava.ui.swing.script.TextEditor["): try: tab = frame.newTab(script, "python") break except: print sys.exc_info() if not tab: try: now = datetime.now() with open(os.path.join(System.getProperty("java.io.tmpdir"), "script-%i-%i-%i_%i:%i.py" % (now.year, now.month, now.day, now.hour, now.minute)), 'w') as f: f.write(script) except: print sys.exc_info() print script gen = JButton("Generate script") gen.addActionListener(generateScript) gc.anchor = GBC.CENTER gc.gridx = 0 gc.gridy += 1 gc.gridwidth = 2 gb.setConstraints(gen, gc) panel.add(gen) frame = JFrame("Generate deconvolution script") frame.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE) frame.getContentPane().add(panel) frame.pack() frame.setLocationRelativeTo(None) # center in the screen frame.setVisible(True)
#see https://www.ini.uzh.ch/~acardona/fiji-tutorial/#s1 for help!! #labels can be set using roiManager #use with genFIJI_ROIs.m from ij import IJ from ij.gui import Roi, OvalRoi from ij.plugin.frame import RoiManager from ij.io import OpenDialog import os roiManager = RoiManager.getRoiManager() fileSelection = OpenDialog("Select the positions file") fileString = IJ.openAsString(fileSelection.getPath()) dataRows = fileString.split("\n") #print fileString #print fileSelection.getPath(), #print fileSelection.getFileName() #Use names as labels in roiManager menu (more>>>) #Save manually as well imp = IJ.getImage() rowIter = 0 for row in dataRows: if len(row)>0: spotDataList = row.split("\t") spotData = [float(x) for x in spotDataList] topLeftX = spotData[1] - spotData[4] topLeftY = spotData[2] - spotData[4] diameter = 2*spotData[4] spotRoi = OvalRoi(topLeftX,topLeftY,diameter,diameter)
def getFile(): od = OpenDialog("Choose image file", None) filename = od.getFileName() directory = od.getDirectory() filepath = directory + filename return filepath
def regBf(fn=None, imp=None, refId=None): """ Register a time series stack to a specified reference slice, from a file (imported by BioFormat) or a stack ImagePlus. Returns a registered ImagePlus. The stack must have only 1 z layer. refId is in the format of [int channel, int slice, int frame] If no refId is supplied, will use the first slice [1,1,1] Note: since TurboReg is used for registeration, there will be temporary opened image windows. """ ## Prepare the right ImagePlus if imp is None: if fn is None: od = OpenDialog("Choose a file", None) filename = od.getFileName() if filename is None: print "User canceled the dialog!" return else: directory = od.getDirectory() filepath = directory + filename print "Selected file path:", filepath else: if os.path.exists(fn) and os.path.isfile(fn): filepath = fn else: print "File does not exist!" return imps = BF.openImagePlus(filepath) imp = imps[0] if imp is None: print "Cannot load file!" return else: if fn is not None: print "File or ImagePlus? Cannot load both." return width = imp.getWidth() height = imp.getHeight() # C nChannels = imp.getNChannels() # Z nSlices = imp.getNSlices() # T nFrames = imp.getNFrames() # pixel size calibration = imp.getCalibration() # Only supoort one z layer if nSlices != 1: print "Only support 1 slice at Z dimension." return # set registration reference slice if refId is None: refC = 1 refZ = 1 refT = 1 else: refC = refId[0] refZ = refId[1] refT = refId[2] if (refC not in range(1, nChannels + 1) or refZ not in range(1, nSlices + 1) or refT not in range(1, nFrames + 1)): print "Invalid reference image!" return stack = imp.getImageStack() registeredStack = ImageStack(width, height, nChannels * nFrames * nSlices) # setup windows, these are needed by TurboReg tmpip = FloatProcessor(width, height) refWin = ImageWindow(ImagePlus("ref", tmpip)) bounds = refWin.getBounds() # refWin.setVisible(False) toRegWin = ImageWindow(ImagePlus("toReg", tmpip)) toRegWin.setLocation(bounds.width + bounds.x, bounds.y) # toRegWin.setVisible(False) toTransformWin = ImageWindow(ImagePlus("toTransform", tmpip)) toTransformWin.setLocation(2 * bounds.width + bounds.x, bounds.y) # toTransformWin.setVisible(False) # get reference image refImp = ImagePlus("ref", stack.getProcessor(imp.getStackIndex(refC, refZ, refT))) refWin.setImage(refImp) tr = TurboReg_() for t in xrange(1, nFrames + 1): IJ.showProgress(t - 1, nFrames) # print "t ", t # do TurboReg on reference channel toRegId = imp.getStackIndex(refC, refZ, t) toRegImp = ImagePlus("toReg", stack.getProcessor(toRegId)) toRegWin.setImage(toRegImp) regArg = "-align " +\ "-window " + toRegImp.getTitle() + " " +\ "0 0 " + str(width - 1) + " " + str(height - 1) + " " +\ "-window " + refImp.getTitle() + " " +\ "0 0 " + str(width - 1) + " " + str(height - 1) + " " +\ "-rigidBody " +\ str(width / 2) + " " + str(height / 2) + " " +\ str(width / 2) + " " + str(height / 2) + " " +\ "0 " + str(height / 2) + " " +\ "0 " + str(height / 2) + " " +\ str(width - 1) + " " + str(height / 2) + " " +\ str(width - 1) + " " + str(height / 2) + " " +\ "-hideOutput" tr = TurboReg_() tr.run(regArg) registeredImp = tr.getTransformedImage() sourcePoints = tr.getSourcePoints() targetPoints = tr.getTargetPoints() registeredStack.setProcessor(registeredImp.getProcessor(), toRegId) # toRegImp.flush() # apply transformation on other channels for c in xrange(1, nChannels + 1): # print "c ", c if c == refC: continue toTransformId = imp.getStackIndex(c, 1, t) toTransformImp = ImagePlus("toTransform", stack.getProcessor(toTransformId)) toTransformWin.setImage(toTransformImp) transformArg = "-transform " +\ "-window " + toTransformImp.getTitle() + " " +\ str(width) + " " + str(height) + " " +\ "-rigidBody " +\ str(sourcePoints[0][0]) + " " +\ str(sourcePoints[0][1]) + " " +\ str(targetPoints[0][0]) + " " +\ str(targetPoints[0][1]) + " " +\ str(sourcePoints[1][0]) + " " +\ str(sourcePoints[1][1]) + " " +\ str(targetPoints[1][0]) + " " +\ str(targetPoints[1][1]) + " " +\ str(sourcePoints[2][0]) + " " +\ str(sourcePoints[2][1]) + " " +\ str(targetPoints[2][0]) + " " +\ str(targetPoints[2][1]) + " " +\ "-hideOutput" tr = TurboReg_() tr.run(transformArg) registeredStack.setProcessor( tr.getTransformedImage().getProcessor(), toTransformId) # toTransformImp.flush() sourcePoints = None targetPoints = None IJ.showProgress(t, nFrames) IJ.showStatus("Frames registered: " + str(t) + "/" + str(nFrames)) refWin.close() toRegWin.close() toTransformWin.close() imp2 = ImagePlus("reg_" + imp.getTitle(), registeredStack) imp2.setCalibration(imp.getCalibration().copy()) imp2.setDimensions(nChannels, nSlices, nFrames) # print "type ", imp.getType() # print "type ", imp2.getType() # print nChannels, " ", nSlices, " ", nFrames # print registeredStack.getSize() for key in imp.getProperties().stringPropertyNames(): imp2.setProperty(key, imp.getProperty(key)) # comp = CompositeImage(imp2, CompositeImage.COLOR) # comp.show() # imp2 = imp.clone() # imp2.setStack(registeredStack) # imp2.setTitle("reg"+imp.getTitle()) # imp2.show() # imp.show() return imp2