def get_calibration_from_metadata(path_to_image): """get the pixel calibration from a given image using Bio-Formats Parameters ---------- path_to_image : str full path to the input image Returns ------- array the physical px size as float for x,y,z """ reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(str(path_to_image)) physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) image_calibration = [physSizeX.value(), physSizeY.value(), physSizeZ.value()] reader.close() return image_calibration
def DirList(baseDir): r = ImageReader() imgStats = {} for root, dirs, files in os.walk(str(baseDir)): for f1 in files: if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith( ".jpeg"): id = root + "/" + f1 r.setId(id) if r is None: print "Couldn\'t open image from file:", id continue w = r.getSizeX() h = r.getSizeY() imgStats[str(w) + "_" + str(h)] = imgStats.get(str(w) + "_" + str(h), 0) + 1 IJ.log("Found image: " + str(id)) #counter += 1 r.close() #print summary summary = '' for k, v in imgStats.iteritems(): dim = k.split("_") ratio = float(dim[0]) / float(dim[1]) IJ.log("Found " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2))) summary = summary + "\nFound " + str( v) + " images of dimension " + str(dim[0]) + "x" + str( dim[1]) + " apect ratio " + str(round(ratio, 2)) return summary
def get_reader(file, inputMeta): options = ImporterOptions() options.setId(file) imps = BF.openImagePlus(options) reader = ImageReader() reader.setMetadataStore(inputMeta) reader.setId(file) return reader
def getFrameIntervalFromImage(image_path): from loci.formats import ImageReader from loci.formats import MetadataTools r = ImageReader() meta = MetadataTools.createOMEXMLMetadata() r.setMetadataStore(meta) r.setId(image_path) frame_interval = meta.getPixelsTimeIncrement(0).value() log("Detected frame rate: %s (%s)" % (frame_interval, image_path)) return frame_interval
def readImageFile(imageFile): #IJ.log(imageFile) #print(imageFile) extension = imageFile.split('.').pop() #Array.pop(). Pratique pour faire une fonction getExtension() options = ImporterOptions() options.setId(imageFile) if extension == "nd": reader = MetamorphReader() else: reader = ImageReader() reader.setId(imageFile) return reader, options
def meta_parser(): """ Iterates through .lif XML/OME metadata, returns selected values eg. timepoints, channels, series count, laser power.. """ # Get metadata. reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(str(Experiment)) # Extracts number of image series, channel number seriesCount = reader.getSeriesCount() channels = reader.getSizeC() #reader.close() # Number of images imageCount = omeMeta.getImageCount() # Image size in pixels AND microns (for scalebar). Physical_x = omeMeta.getPixelsPhysicalSizeX(0) Pixel_x = omeMeta.getPixelsSizeX(0) Physical_x = Physical_x.value() Pixel_x = Pixel_x.getNumberValue() # Assumes square image (x=y). org_size = (Physical_x*Pixel_x)*2 # Laser power of donor excitation laser. if channels == 3: LP = omeMeta.getChannelLightSourceSettingsAttenuation(0,0) LP = 1 - LP.getNumberValue() else: LP = 0 timelist = [] for timepoint in range (imageCount): times = omeMeta.getImageAcquisitionDate(timepoint) timelist.append(times.toString()) # YY.MM... to minutes. timelist =[ time.mktime(time.strptime(times, u'%Y-%m-%dT%H:%M:%S')) for times in timelist ] timelist_unsorted =[ (times - timelist[0])/60 for times in timelist ] timelist = sorted(timelist_unsorted) # Prints to log. IJ.log("Total # of image series (from BF reader): " + str(seriesCount)) IJ.log("Total # of image series (from OME metadata): " + str(imageCount)) IJ.log("Total # of channels (from OME metadata): " + str(channels)) IJ.log("Laserpower (from OME metadata): " + str(LP)) return channels, seriesCount, timelist, timelist_unsorted, LP, org_size
def save_ome_tiff(filename, image, metadata): reader = ImageReader() writer = ImageWriter() writer.setMetadataRetrieve(metadata) writer.setId(filename) nchan = image.getNChannels() stack = image.getImageStack() print(image.getStackSize()) for i in range(nchan): writer.setSeries(0) process = stack.getProcessor(i + 1) pixels = process.getPixels() pixels = DataTools.floatsToBytes(pixels, True) writer.saveBytes(i, pixels) writer.close()
def get_metadata(imagefile, imageID=0): metainfo = {} # initialize the reader and get the OME metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() metainfo['ImageCount_OME'] = omeMeta.getImageCount() reader.setMetadataStore(omeMeta) reader.setId(imagefile) metainfo['SeriesCount_BF'] = reader.getSeriesCount() reader.close() # read dimensions TZCXY from OME metadata metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue() metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue() metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue() metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue() metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue() # store info about stack if metainfo['SizeZ'] == 1: metainfo['is3d'] = False elif metainfo['SizeZ'] > 1: metainfo['is3d'] = True # get the scaling for XYZ physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) if physSizeX is not None: metainfo['ScaleX'] = round(physSizeX.value(), 3) metainfo['ScaleY'] = round(physSizeY.value(), 3) if physSizeX is None: metainfo['ScaleX'] = None metainfo['ScaleY'] = None if physSizeZ is not None: metainfo['ScaleZ'] = round(physSizeZ.value(), 3) if physSizeZ is None: metainfo['ScaleZ'] = None # sort the dictionary metainfo = OrderedDict(sorted(metainfo.items())) return metainfo
def get_metadata(params): """get image metadata, either from the image file or from acquisition-time metadata""" if params.metadata_source == "Image metadata": try: reader = ImageReader() ome_meta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(ome_meta) reader.setId(params.input_image_path) reader.close() params.setFrameInterval( ome_meta.getPixelsTimeIncrement(0).value()) params.setIntervalUnit( ome_meta.getPixelsTimeIncrement(0).unit().getSymbol()) params.setPixelPhysicalSize( ome_meta.getPixelsPhysicalSizeX(0).value()) params.setPixelSizeUnit( ome_meta.getPixelsPhysicalSizeX(0).unit().getSymbol()) params.setMetadataSourceFile(None) except Exception as e: print(e.message) mbui.warning_dialog([ "There was a problem getting metadata from the image: ", e.message, "Please consider using acquisition metadata instead (click OK). ", "Or, quit the analysis run and investigate image metadata by hand. " ]) params.setMetadataSource("Acquisition metadata") if params.metadata_source == "Acquisition metadata": od = OpenDialog('Choose acquisition metadata file...', os.path.dirname(params.input_image_path), '*.txt') file_path = od.getPath() if file_path is None: raise IOError('no metadata file chosen') acq_metadata_dict = import_iq3_metadata(file_path) try: params.setFrameInterval(acq_metadata_dict['frame_interval']) except KeyError: params.setFrameInterval(1.0) try: params.setIntervalUnit(acq_metadata_dict['time_unit']) except KeyError: params.setIntervalUnit('frames') params.setPixelPhysicalSize(acq_metadata_dict['x_physical_size']) params.setPixelSizeUnit(acq_metadata_dict['x_unit']) params.setMetadataSourceFile(file_path) return params
def load_ome_img(file_name): """ :param file_name: :return: """ imps = BF.openImagePlus(file_name) imag = imps[0] # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(file_name) print(omeMeta) reader.close() return (imag, omeMeta)
def select_input(self, event): # get the info about the number of images in the file self.input_path = IJ.getFilePath("Choose a File") # if default naming is not changed use file name if self.textfield2.text == self.default_naming: self.file_core_name = path.basename(self.input_path).split('.czi')[0] else: self.file_core_name = self.textfield2.text # put that name in the text field self.panel.getComponents()[1].setText(self.file_core_name) reader = ImageReader() reader.setId(self.input_path) metadata_list = reader.getCoreMetadataList() # slide scanner makes a piramid of X for every ROI you draw # resolution is not updated in the metadata so it needs to be calculated manually number_of_images, self.num_of_piramids_list = get_data_structure(metadata_list) print("Number of images is " + str(number_of_images)) # get the indexes of the maximum resolution images self.max_res_indexes = get_maxres_indexes(self.num_of_piramids_list) print("Number of pyramids are " + str(self.num_of_piramids_list)) # set names of subimages in the list, waiting to compare to current outputs self.possible_slices = [self.file_core_name + "_slice-" + str(n) for n in range(number_of_images)] self.binFactor_list, self.binStep_list = get_binning_factor(self.max_res_indexes, self.num_of_piramids_list, metadata_list) print("Binning factors are " + str(self.binFactor_list)) print("Binning steps are " + str(self.binStep_list)) # create output directory if it doesn't exist # get the animal id animal_id = self.file_core_name.split('_')[0] self.output_path = path.join(path.dirname(path.dirname(self.input_path)), "Processed_data", animal_id, "ROIs") if path.isdir(self.output_path): print("Output path was already created") else: makedirs(self.output_path) print("Output path created") # update_lists depending on whether something has been processed already self.update_list()
def time_parser(): """ Iterates through timelapse, """ """ outputs timepoints with corresponding seriesnames. """ """ - S. Grødem 2017 """ # Get metadata. reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(str(Experiment)) # Extracts number of image series, channel number seriesCount = reader.getSeriesCount() reader.close() # Gets timepoints, in minutes. timelist = [] namelist = [] for timepoint in range (seriesCount): times = omeMeta.getImageAcquisitionDate(timepoint) timelist.append(times.toString()) namelist.append(omeMeta.getImageName(timepoint)) # YY.MM... to minutes. timelist =[ time.mktime(time.strptime(times, u'%Y-%m-%dT%H:%M:%S')) for times in timelist ] timelist_unsorted =[ (times - timelist[0])/60 for times in timelist ] # Sort timepoints. timelist, namelist = zip(*sorted(zip(timelist_unsorted, namelist))) timelist = [round(float(i), 3) for i in timelist] # Output to IJ log images = zip(timelist, namelist) IJ.log("Series number: " + str(seriesCount)) IJ.log("*"*15) for i in range(len(images)): IJ.log("Name: " + str(images[i][1])) IJ.log("Time: " + str(images[i][0])) IJ.log("-"*15)
def choose_series(filepath, params): """if input file contains more than one image series (xy position), prompt user to choose which one to use""" # todo: if necessary (e.g. if lots of series), can improve thumbnail visuals based loosely on https://github.com/ome/bio-formats-imagej/blob/master/src/main/java/loci/plugins/in/SeriesDialog.java import_opts = ImporterOptions(); import_opts.setId(filepath); reader = ImageReader(); ome_meta = MetadataTools.createOMEXMLMetadata(); reader.setMetadataStore(ome_meta); reader.setId(filepath); no_series = reader.getSeriesCount(); if no_series == 1: return import_opts, params; else: series_names = [ome_meta.getImageName(idx) for idx in range(no_series)]; dialog = GenericDialog("Select series to load..."); dialog.addMessage("There are multiple series in this file! \n" + "This is probably because there are multiple XY stage positions. \n " + "Please choose which series to load: "); thumbreader = BufferedImageReader(reader); cbg = CheckboxGroup(); for idx in range(no_series): p = Panel(); p.add(Box.createRigidArea(Dimension(thumbreader.getThumbSizeX(), thumbreader.getThumbSizeY()))); ThumbLoader.loadThumb(thumbreader, idx, p, True); dialog.addPanel(p); cb = Checkbox(series_names[idx], cbg, idx==0); p.add(cb); dialog.showDialog(); if dialog.wasCanceled(): raise KeyboardInterrupt("Run canceled"); if dialog.wasOKed(): selected_item = cbg.getSelectedCheckbox().getLabel(); selected_index = series_names.index(selected_item); params.setSelectedSeriesIndex(selected_index); for idx in range(0, no_series): import_opts.setSeriesOn(idx, True) if (idx==selected_index) else import_opts.setSeriesOn(idx, False); reader.close(); return import_opts, params
def processFile(filename, inDir, outDir, dichroics, mergeList): if mergeList is None: merge = False else: merge = True filenameExExt = os.path.splitext(filename)[0] filepath = inDir + filename # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(filepath) numChannels = reader.getSizeC() numSlices = reader.getSizeZ() numFrames = reader.getSizeT() seriesCount = reader.getSeriesCount() globalMetadata = reader.getGlobalMetadata() seriesMetadata = reader.getSeriesMetadata() objLensName = globalMetadata['- Objective Lens name #1'] areaRotation = float(seriesMetadata['area rotation #1']) acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1']) if 'regionInfo rotation #1' in seriesMetadata: regionInfoRotation = float(seriesMetadata['regionInfo rotation #1']) else: regionInfoRotation = float(0) totalRotation = areaRotation + regionInfoRotation physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) pxSizeX = physSizeX.value(UNITS.MICROM) pxSizeY = physSizeY.value(UNITS.MICROM) # log metadata IJ.log("\nMETADATA") #IJ.log("Filename: " + filepath) IJ.log("Number of series: " + str(seriesCount)) IJ.log("Number of channels: " + str(numChannels)) IJ.log("Number of frames: " + str(numFrames)) IJ.log("Number of slices: " + str(numSlices)) IJ.log("Objective lens: " + objLensName) IJ.log("FOV rotation: " + str(areaRotation)) IJ.log("ROI rotation: " + str(regionInfoRotation)) IJ.log("Total rotation: " + str(totalRotation)) IJ.log("Pixel size:") IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol()) IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol()) if merge: tifDir = outDir + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated temporary folder: " + tifDir + "\n") else: IJ.log("Unable to create temporary folder!\n") else: tifDir = outDir + filenameExExt + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated subfolder: " + tifDir + "\n") else: IJ.log("\nSubfolder " + tifDir + " already exists.\n") # correct images tifFilePaths = [] for i in range(numChannels): ip = extractChannel(oirFile=filepath, ch=i) if dichroics[i] == "DM1": IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.") else: offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]]) xom = offsets['x'] yom = offsets['y'] if abs(totalRotation) > 0.1: rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation) xom = rotOff['x'] yom = rotOff['y'] xop = int(round(xom/pxSizeX)) yop = int(round(yom/pxSizeY)) IJ.log("Channel " + str(i+1) + " offsets") IJ.log("\t\tMicrometres") IJ.log("\t\t\t\tx = " + str(xom)) IJ.log("\t\t\t\ty = " + str(yom)) IJ.log("\t\tPixels") IJ.log("\t\t\t\tx = " + str(xop)) IJ.log("\t\t\t\ty = " + str(yop)) IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack") tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif" tifFilePaths.append(tifFilePath) if os.path.exists(tifFilePath): IJ.log("\nOutput file exists: " + tifFilePath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") return FileSaver(ip).saveAsTiff(tifFilePath) if merge: max_list = [] for i in range(len(mergeList)): if mergeList[i] != None: mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]]) channel = mergeList[i]#https://python.hotexamples.com/examples/ij.plugin/RGBStackMerge/mergeChannels/python-rgbstackmerge-mergechannels-method-examples.html projector = ZProjector(channel) projector.setMethod(ZProjector.MAX_METHOD) projector.doProjection() max_list.append(projector.getProjection()) merged = RGBStackMerge.mergeChannels(mergeList, False) merged_max = RGBStackMerge.mergeChannels(max_list, False) mergedChannelFilepath = outDir + filenameExExt + ".tif" maxMergedChannelFilepath = outDir + filenameExExt + "_max.tif" if os.path.exists(mergedChannelFilepath): IJ.log("\nOutput file exists: " + mergedChannelFilepath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") FileSaver(merged).saveAsTiff(mergedChannelFilepath) FileSaver(merged_max).saveAsTiff(maxMergedChannelFilepath) for tf in tifFilePaths: os.remove(tf) os.rmdir(tifDir) IJ.log("\nFinished processing file:\n" + filepath + "\n") if merge: IJ.log("Image file with channels aligned:\n" + outDir + filenameExExt + ".tif\n") else: IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
def processDirectory(): # start logging IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n") # ask user for an input directory dc = DirectoryChooser("Choose folder containing Olympus (.oir) files") inputDir = dc.getDirectory() if inputDir is None: IJ.log("User canceled the dialog!\nImage processing canceled!\n") return IJ.log("\nInput directory: " + inputDir + "\n") oirFiles = [] for f in os.listdir(inputDir): if f.endswith(".oir"): oirFiles.append(f) if len(oirFiles) < 1: IJ.log("Input directory does not contain any Olympus (.oir) files.\nNo images to process.\n") return # find out how many channels are in first file (we will assume all files have same number of channels and were acquired using same DMs) reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(inputDir + oirFiles[0]) numChannels = reader.getSizeC() # ask user to identify dichroic mirror used for each channel gdDM = GenericDialog("Dichroic mirrors") DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] for i in range(numChannels): gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0]) gdDM.addCheckbox("Merge channels", False) gdDM.showDialog() if gdDM.wasCanceled(): IJ.log("User canceled the dialog!\nImage processing canceled!\n") return dichroics = [] for i in range(numChannels): dichroics.append(gdDM.getNextChoice()) merge = gdDM.getNextBoolean() IJ.log("User selected dichroic mirrors") for i in range(numChannels): IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i]) IJ.log("\n") if merge: channels = [] chDict = {} for i in range(numChannels): chName = "Channel"+str(i+1) channels.append(chName) chDict[chName] = i channels.append("NONE") colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"] gdMerge = GenericDialog("Merge channels") for c in colourChoices: gdMerge.addChoice(c + ":", channels, channels[numChannels]) gdMerge.showDialog() if gdMerge.wasCanceled(): IJ.log("User canceled the dialog!\nImage processing canceled!\n") return IJ.log("User selected channel colours") usersMergeList = [] for i in range(len(colourChoices)): ch = gdMerge.getNextChoice() if ch == "NONE": usersMergeList.append(None) else: usersMergeList.append(chDict[ch]) IJ.log("\t\t" + colourChoices[i] + ": " + ch) IJ.log("\n\n") # ask user for an output directory dc = DirectoryChooser("Choose folder for output") outputDir = dc.getDirectory() if outputDir is None: IJ.log("User canceled the dialog!\nImage processing canceled!\n") return counter = 0 totalFiles = len(oirFiles) for o in oirFiles: counter +=1 IJ.log("Processing file " + str(counter) + " of " + str(totalFiles) + "\n") IJ.log("File path: " + inputDir + o) if merge: ml = usersMergeList[:] else: ml = None processFile(o, inputDir, outputDir, dichroics, ml) IJ.log("\n--------------------------\n")
def initreader(vsi_path): reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(vsi_path) return(reader)
def get_lif_series_length(fpath): reader = ImageReader() reader.setId(fpath) return reader.getSeriesCount();
def readczi(imagefile, stitchtiles=True, setflatres=False, readpylevel=0, setconcat=True, openallseries=True, showomexml=False, attach=False, autoscale=True): log.info('Filename : ' + imagefile) metainfo = {} # checking for thr file Extension metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile)) log.info('Detected File Extension : ' + metainfo['Extension']) # initialize the reader and get the OME metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() #metainfo['ImageCount_OME'] = omeMeta.getImageCount() reader.setMetadataStore(omeMeta) reader.setId(imagefile) metainfo['SeriesCount_BF'] = reader.getSeriesCount() reader.close() # get the scaling for XYZ physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) if physSizeX is not None: metainfo['ScaleX'] = round(physSizeX.value(), 3) metainfo['ScaleY'] = round(physSizeX.value(), 3) if physSizeX is None: metainfo['ScaleX'] = None metainfo['ScaleY'] = None if physSizeZ is not None: metainfo['ScaleZ'] = round(physSizeZ.value(), 3) if physSizeZ is None: metainfo['ScaleZ'] = None options = DynamicMetadataOptions() options.setBoolean("zeissczi.autostitch", stitchtiles) options.setBoolean("zeissczi.attachments", attach) czireader = ZeissCZIReader() czireader.setFlattenedResolutions(setflatres) czireader.setMetadataOptions(options) czireader.setId(imagefile) # Set the preferences in the ImageJ plugin # Note although these preferences are applied, they are not refreshed in the UI Prefs.set("bioformats.zeissczi.allow.autostitch", str(stitchtiles).lower()) Prefs.set("bioformats.zeissczi.include.attachments", str(attach).lower()) # metainfo = {} metainfo['rescount'] = czireader.getResolutionCount() metainfo['SeriesCount_CZI'] = czireader.getSeriesCount() #metainfo['flatres'] = czireader.hasFlattenedResolutions() #metainfo['getreslevel'] = czireader.getResolution() # Dimensions metainfo['SizeT'] = czireader.getSizeT() metainfo['SizeZ'] = czireader.getSizeZ() metainfo['SizeC'] = czireader.getSizeC() metainfo['SizeX'] = czireader.getSizeX() metainfo['SizeY'] = czireader.getSizeY() # check for autostitching and possibility to read attachment metainfo['AllowAutoStitching'] = czireader.allowAutostitching() metainfo['CanReadAttachments'] = czireader.canReadAttachments() # read in and display ImagePlus(es) with arguments options = ImporterOptions() options.setOpenAllSeries(openallseries) options.setShowOMEXML(showomexml) options.setConcatenate(setconcat) options.setAutoscale(autoscale) options.setId(imagefile) # open the ImgPlus imps = BF.openImagePlus(options) metainfo['Pyramid Level Output'] = readpylevel + 1 try: imp = imps[readpylevel] pylevelout = metainfo['SeriesCount_CZI'] except: # fallback option log.info('PyLevel=' + str(readpylevel) + ' does not exist.') log.info('Using Pyramid Level = 0 as fallback.') imp = imps[0] pylevelout = 0 metainfo['Pyramid Level Output'] = pylevelout # get the stack and some info imgstack = imp.getImageStack() metainfo['Output Slices'] = imgstack.getSize() metainfo['Output SizeX'] = imgstack.getWidth() metainfo['Output SizeY'] = imgstack.getHeight() # calc scaling in case of pyramid scale = float(metainfo['SizeX']) / float(metainfo['Output SizeX']) metainfo['Pyramid Scale Factor'] = scale metainfo['ScaleX Output'] = metainfo['ScaleX'] * scale metainfo['ScaleY Output'] = metainfo['ScaleY'] * scale # set the correct scaling imp = MiscTools.setscale(imp, scaleX=metainfo['ScaleX Output'], scaleY=metainfo['ScaleX Output'], scaleZ=metainfo['ScaleZ'], unit="micron") # close czireader czireader.close() return imp, metainfo
def process(filename): TEMPLATE_GENERAL = "The data was acquired on a {ID} microscope, using a {objective} {NA} NA objective. The pixel size was {pxx_microns} microns. " TEMPLATE_CHANNEL = "The excitation and emission wavelengths for channel {ch} were {ex} and {em} and the {exposureTime} was {et}. " TEMPLATE_3D = "A series of slices was collected with a step size of {pzz_microns} microns. " TEMPLATE_TIME = "Images were acquired with a time interval of {timeInterval}. " BLURB = "" # Admin stuff import sys from org.scijava.ui.swing.console import LoggingPanel logger.addLogListener( LoggingPanel(context) ); logger.info(filename.getAbsolutePath()) # Get a BioFormats reader from loci.formats import ImageReader ir = ImageReader() # Adapted from https://github.com/ome/bioformats/blob/develop/components/formats-gpl/utils/GetPhysicalMetadata.java m = omeservice.createOMEXMLMetadata() ir.setMetadataStore(m) ir.setId(filename.getAbsolutePath()) # Some checks ninstruments = m.getInstrumentCount() if ninstruments > 1: logger.error("More than one instrument found. Automatic generation will not work...") if ninstruments == 0: logger.error("No instrument metadata found! Automatic generation will not work...") # Manufacturer and modalities try: ID = m.getMicroscopeManufacturer(0) except: logger.error(sys.exc_info()[0]) ID = None if ID == None: ff = str(ir.getFormat()) if "Zeiss" in ff: ID="Zeiss" elif "Nikon" in ff: ID="Nikon" tID = ir.getMetadataValue("m_sMicroscopePhysFullName") if tID is not None: ID = tID elif "Olympus" in ff: ID="Olympus" else: ID="" for ic in range(ir.getSizeC()): mode = m.getChannelAcquisitionMode(0,ic) if ic>0 and mode != mode0: logger.warn("WARNING : Not all channels were acquired with the same modality..") else: mode0=mode if mode == None: mode_with_spaces = "UNKNOWN" else: mode_with_spaces = "" if str(mode) == "TIRF": mode_with_spaces = str(mode) else: for letter in str(mode): if letter.isupper(): mode_with_spaces += " "+letter.lower() else: mode_with_spaces += letter ID+=" "+str(mode_with_spaces.strip()) if ninstruments == 1: nobjectives = m.getObjectiveCount(0) if nobjectives > 1: logger.error("More than one objective found. Automatic generation will generate information for the first objective only.") objective = "UNKNOWN" if ninstruments == 1 and nobjectives >0: try: magnification1 = m.getObjectiveNominalMagnification(0,0) if magnification1 != None: objective = "{:.0f}x".format(magnification1) except: logger.error(sys.exc_info()[0]) msg = "Could not extract information about the objective! The image might be missing some crucial metadata." logger.error(msg) if objective == "UNKNOWN": if "Nikon" in ff: objective0 = str(ir.getMetadataValue("sObjective")) if objective0 is not None: objective = objective0 NA = "UNKNOWN" if ninstruments == 1 and nobjectives >0: try: NA1 = m.getObjectiveLensNA(0,0) if NA1 != None: NA = str(NA1) except: msg = "Could not extract information about the objective! The image might be missing some crucial metadata." logger.error(msg) NAm = ir.getMetadataValue("Numerical Aperture") if NA=="UNKNOWN" and "Nikon" in ff and NAm is not None: NA = str(NAm) #else: # HT=ir.getGlobalMetadata() # for k in HT.keys(): # print "{}={}".format(k,HT.get(k)) # Pixel size nimages = m.getImageCount() logger.info("Found {} images".format(nimages)) from ome.units import UNITS pxx_microns = "UNKNOWN" if ninstruments==1 and nobjectives>0: try: pxx_microns = "{:.2f}".format(m.getPixelsPhysicalSizeX(0).value(UNITS.MICROMETER)) except: logger.error(sys.exc_info()[0]) msg = "Could not extract physical pixel size! The image might be missing some crucial metadata." logger.error(msg) # Is it 3D? is3D = ir.getSizeZ()>1 pzz_microns = "UNKNOWN" if ninstruments==1 and nobjectives>0: try: pzz_microns = "{:.2f}".format(m.getPixelsPhysicalSizeZ(0).value(UNITS.MICROMETER)) except: logger.error(sys.exc_info()[0]) msg = "This image is 3D but I could not extract physical step size! The image might be missing some crucial metadata." logger.error(msg) # TODO Is it a time series? # GENERAL BLURB GENERATION BLURB += TEMPLATE_GENERAL.format(ID=ID, objective=objective, NA=NA, pxx_microns=pxx_microns) if is3D: BLURB += TEMPLATE_3D.format(pzz_microns=pzz_microns) # Extract channel information for ic in range(ir.getSizeC()): try: ex0 = m.getChannelExcitationWavelength(0,ic) if ex0==None: ex = "UNKNOWN" else: ex="{:.0f} nm".format(ex0.value(UNITS.NANOMETER)) except: logger.error(sys.exc_info()[0]) logger.error("Wasn't able to extract channel wavelength information for channel {}.".format(ic+1)) continue try: em0 = m.getChannelEmissionWavelength(0,ic) if em0==None: em = "UNKNOWN" else: em="{:.0f} nm".format(em0.value(UNITS.NANOMETER)) except: logger.error(sys.exc_info()[0]) logger.error("Wasn't able to extract channel wavelength information for channel {}.".format(ic+1)) continue #try: ix = ir.getIndex(0, ic, 0) # NOTE : First z plane, first timepoint only et = m.getPlaneExposureTime(0,ix) if et==None: et = "UNKNOWN" else: etms = et.value(UNITS.MILLISECOND) if "CZI" in ff: # TODO Check if error is across other images logger.warn("The exposure time was divided by 1000 to account for ms mistaken as s in CZI files") etms = etms/1000 if etms<1000: et=str("{:.2f} ms".format(etms)) else: et=str("{} s".format(etms/1000)) if etms/1000>600: logger.warn("Exposure time for channel {} is {}s. That's longer than 10m, please double check metadata to make sure it's correct".format(ic+1,etms/1000)) BLURB += TEMPLATE_CHANNEL.format(ch=ic+1, ex=ex, exposureTime="exposure time", et=et, em=em) #except: # logger.error("Wasn't able to extract channel {} exposure time information.".format(ic+1)) return BLURB
def main(): Interpreter.batchMode = True if (lambda_flat == 0) ^ (lambda_dark == 0): print ("ERROR: Both of lambda_flat and lambda_dark must be zero," " or both non-zero.") return lambda_estimate = "Automatic" if lambda_flat == 0 else "Manual" print "Loading images..." # Use BioFormats reader directly to determine dataset dimensions without # reading every single image. The series count (num_images) is the one value # we can't easily get any other way, but we might as well grab the others # while we have the reader available. bfreader = ImageReader() bfreader.id = str(filename) num_images = bfreader.seriesCount num_channels = bfreader.sizeC width = bfreader.sizeX height = bfreader.sizeY bfreader.close() # The internal initialization of the BaSiC code fails when we invoke it via # scripting, unless we explicitly set a the private 'noOfSlices' field. # Since it's private, we need to use Java reflection to access it. Basic_noOfSlices = Basic.getDeclaredField('noOfSlices') Basic_noOfSlices.setAccessible(True) basic = Basic() Basic_noOfSlices.setInt(basic, num_images) # Pre-allocate the output profile images, since we have all the dimensions. ff_image = IJ.createImage("Flat-field", width, height, num_channels, 32); df_image = IJ.createImage("Dark-field", width, height, num_channels, 32); print("\n\n") # BaSiC works on one channel at a time, so we only read the images from one # channel at a time to limit memory usage. for channel in range(num_channels): print "Processing channel %d/%d..." % (channel + 1, num_channels) print "===========================" options = ImporterOptions() options.id = str(filename) options.setOpenAllSeries(True) # concatenate=True gives us a single stack rather than a list of # separate images. options.setConcatenate(True) # Limit the reader to the channel we're currently working on. This loop # is mainly why we need to know num_images before opening anything. for i in range(num_images): options.setCBegin(i, channel) options.setCEnd(i, channel) # openImagePlus returns a list of images, but we expect just one (a # stack). input_image = BF.openImagePlus(options)[0] # BaSiC seems to require the input image is actually the ImageJ # "current" image, otherwise it prints an error and aborts. WindowManager.setTempCurrentImage(input_image) basic.exec( input_image, None, None, "Estimate shading profiles", "Estimate both flat-field and dark-field", lambda_estimate, lambda_flat, lambda_dark, "Ignore", "Compute shading only" ) input_image.close() # Copy the pixels from the BaSiC-generated profile images to the # corresponding channel of our output images. ff_channel = WindowManager.getImage("Flat-field:%s" % input_image.title) ff_image.slice = channel + 1 ff_image.getProcessor().insert(ff_channel.getProcessor(), 0, 0) ff_channel.close() df_channel = WindowManager.getImage("Dark-field:%s" % input_image.title) df_image.slice = channel + 1 df_image.getProcessor().insert(df_channel.getProcessor(), 0, 0) df_channel.close() print("\n\n") template = '%s/%s-%%s.tif' % (output_dir, experiment_name) ff_filename = template % 'ffp' IJ.saveAsTiff(ff_image, ff_filename) ff_image.close() df_filename = template % 'dfp' IJ.saveAsTiff(df_image, df_filename) df_image.close() print "Done!"
def openfile(imagefile, stitchtiles=True, setflatres=False, readpylevel=0, setconcat=True, openallseries=True, showomexml=False, attach=False, autoscale=True, imageID=0): # stitchtiles = option of CZIReader to return the raw tiles as # individual series rather than the auto-stitched images metainfo = {} # checking for thr file Extension metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile)) # initialite the reader and get the OME metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() metainfo['ImageCount_OME'] = omeMeta.getImageCount() reader.setMetadataStore(omeMeta) reader.setId(imagefile) metainfo['SeriesCount_BF'] = reader.getSeriesCount() reader.close() # read dimensions TZCXY from OME metadata metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue() metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue() metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue() metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue() metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue() # store info about stack if metainfo['SizeZ'] == 1: metainfo['is3d'] = False elif metainfo['SizeZ'] > 1: metainfo['is3d'] = True # get the scaling for XYZ physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) if physSizeX is not None: metainfo['ScaleX'] = round(physSizeX.value(), 3) metainfo['ScaleY'] = round(physSizeX.value(), 3) if physSizeX is None: metainfo['ScaleX'] = None metainfo['ScaleY'] = None if physSizeZ is not None: metainfo['ScaleZ'] = round(physSizeZ.value(), 3) if physSizeZ is None: metainfo['ScaleZ'] = None # if image file is Carl Zeiss Image - CZI if metainfo['Extension'] == '.czi': # read the CZI file using the CZIReader # pylevel = 0 - read the full resolution image imp, metainfo = ImportTools.readCZI(imagefile, metainfo, stitchtiles=stitchtiles, setflatres=setflatres, readpylevel=readpylevel, setconcat=setconcat, openallseries=openallseries, showomexml=showomexml, attach=attach, autoscale=autoscale) # if image file is not Carl Zeiss Image - CZI if metainfo['Extension'] != '.czi': # read the imagefile using the correct method if metainfo['Extension'].lower() == ('.jpg' or '.jpeg'): # use dedicated method for jpg imp, metainfo = ImageTools.openjpg(imagefile, method='IJ') else: # if not jpg - use BioFormats imp, metainfo = ImportTools.readbf(imagefile, metainfo, setflatres=setflatres, readpylevel=readpylevel, setconcat=setconcat, openallseries=openallseries, showomexml=showomexml, autoscale=autoscale) return imp, metainfo
def processFile(): # start logging IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n") # ask user for file ofd = OpenDialog("Choose a file", None) filename = ofd.getFileName() if filename is None: IJ.log("User canceled the dialog!\nImage processing canceled!\n") return directory = ofd.getDirectory() filepath = directory + filename IJ.log("File path: " + filepath) if not filename.endswith(".oir"): IJ.log("Not an Olympus (.oir) file.\nNo image to process.\n") return filenameExExt = os.path.splitext(filename)[0] # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(filepath) numChannels = reader.getSizeC() numSlices = reader.getSizeZ() numFrames = reader.getSizeT() seriesCount = reader.getSeriesCount() globalMetadata = reader.getGlobalMetadata() seriesMetadata = reader.getSeriesMetadata() objLensName = globalMetadata['- Objective Lens name #1'] areaRotation = float(seriesMetadata['area rotation #1']) acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1']) if 'regionInfo rotation #1' in seriesMetadata: regionInfoRotation = float(seriesMetadata['regionInfo rotation #1']) else: regionInfoRotation = float(0) totalRotation = areaRotation + regionInfoRotation physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) pxSizeX = physSizeX.value(UNITS.MICROM) pxSizeY = physSizeY.value(UNITS.MICROM) # log metadata IJ.log("\nMETADATA") #IJ.log("Filename: " + filepath) IJ.log("Number of series: " + str(seriesCount)) IJ.log("Number of channels: " + str(numChannels)) IJ.log("Number of frames: " + str(numFrames)) IJ.log("Number of slices: " + str(numSlices)) IJ.log("Objective lens: " + objLensName) IJ.log("FOV rotation: " + str(areaRotation)) IJ.log("ROI rotation: " + str(regionInfoRotation)) IJ.log("Total rotation: " + str(totalRotation)) IJ.log("Pixel size:") IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol()) IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol()) # ask user to identify dichroic mirror used for each channel gdDM = GenericDialog("Dichroic mirrors") DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] for i in range(numChannels): gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0]) gdDM.addCheckbox("Merge channels", False) gdDM.showDialog() if gdDM.wasCanceled(): IJ.log("User canceled the dialog!\nImage processing canceled!\n") return dichroics = [] for i in range(numChannels): dichroics.append(gdDM.getNextChoice()) merge = gdDM.getNextBoolean() IJ.log("\nUser selected dichroic mirrors") for i in range(numChannels): IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i]) if merge: channels = [] chDict = {} for i in range(numChannels): chName = "Channel"+str(i+1) channels.append(chName) chDict[chName] = i channels.append("NONE") colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"] gdMerge = GenericDialog("Merge channels") for c in colourChoices: gdMerge.addChoice(c + ":", channels, channels[numChannels]) gdMerge.showDialog() if gdMerge.wasCanceled(): IJ.log("User canceled the dialog!\nImage processing canceled!\n") return IJ.log("\nUser selected channel colours") mergeList = [] for i in range(len(colourChoices)): ch = gdMerge.getNextChoice() if ch == "NONE": mergeList.append(None) else: mergeList.append(chDict[ch]) IJ.log("\t\t" + colourChoices[i] + ": " + ch) # ask user for an output directory dc = DirectoryChooser("Choose folder for output") od = dc.getDirectory() if od is None: IJ.log("User canceled the dialog!\nImage processing canceled!\n") return if merge: tifDir = od + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated temporary folder: " + tifDir + "\n") else: IJ.log("Unable to create temporary folder!\n") else: tifDir = od + filenameExExt + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated subfolder: " + tifDir + "\n") else: IJ.log("\nSubfolder " + tifDir + " already exists") # correct images tifFilePaths = [] for i in range(numChannels): ip = extractChannel(oirFile=filepath, ch=i) if dichroics[i] == "DM1": IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.") else: offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]]) xom = offsets['x'] yom = offsets['y'] if abs(totalRotation) > 0.1: rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation) xom = rotOff['x'] yom = rotOff['y'] xop = int(round(xom/pxSizeX)) yop = int(round(yom/pxSizeY)) IJ.log("Channel " + str(i+1) + " offsets") IJ.log("\t\tMicrometres") IJ.log("\t\t\t\tx = " + str(xom)) IJ.log("\t\t\t\ty = " + str(yom)) IJ.log("\t\tPixels") IJ.log("\t\t\t\tx = " + str(xop)) IJ.log("\t\t\t\ty = " + str(yop)) IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack") tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif" tifFilePaths.append(tifFilePath) if os.path.exists(tifFilePath): IJ.log("\nOutput file exists: " + tifFilePath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") return FileSaver(ip).saveAsTiff(tifFilePath) if merge: for i in range(len(mergeList)): if mergeList[i] != None: mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]]) merged = RGBStackMerge.mergeChannels(mergeList, False) mergedChannelFilepath = od + filenameExExt + ".tif" if os.path.exists(mergedChannelFilepath): IJ.log("\nOutput file exists: " + mergedChannelFilepath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") FileSaver(merged).saveAsTiff(mergedChannelFilepath) for tf in tifFilePaths: os.remove(tf) os.rmdir(tifDir) IJ.log("\nFinished processing file:\n" + filepath + "\n") if merge: IJ.log("Image file with channels aligned:\n" + od + filenameExExt + ".tif\n") else: IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
def process_time_points(root, files, outdir): '''Concatenate images and write ome.tiff file. If image contains already multiple time points just copy the image''' concat = 1 files.sort() options = ImporterOptions() options.setId(files[0]) options.setVirtual(1) image = BF.openImagePlus(options) image = image[0] if image.getNFrames() > 1: IJ.log(files[0] + " Contains multiple time points. Can only concatenate single time points! Don't do anything!") image.close() return width = image.getWidth() height = image.getHeight() for patt in pattern: outName = re.match(patt, os.path.basename(files[0])) if outName is None: continue if outdir is None: outfile = os.path.join(root, outName.group(1) + '.ome.tif') else: outfile = os.path.join(outdir, outName.group(1) + '.ome.tif') reader = ImageReader() reader.setMetadataStore(MetadataTools.createOMEXMLMetadata()) reader.setId(files[0]) timeInfo = [] omeOut = reader.getMetadataStore() omeOut = setUpXml(omeOut, image, files) reader.close() image.close() IJ.log ('Concatenates ' + os.path.join(root, outName.group(1) + '.ome.tif')) itime = 0 try: for ifile, fileName in enumerate(files): print fileName omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(fileName) #print omeMeta.getPlaneDeltaT(0,0) #print omeMeta.getPixelsTimeIncrement(0) if fileName.endswith('.czi'): if ifile == 0: T0 = omeMeta.getPlaneDeltaT(0,0).value() dT = omeMeta.getPlaneDeltaT(0,0).value() - T0 unit = omeMeta.getPlaneDeltaT(0,0).unit() else: timeInfo.append(getTimePoint(reader, omeMeta)) unit = omeMeta.getPixelsTimeIncrement(0).unit() try: dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2) except: dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds nrImages = reader.getImageCount() for i in range(0, reader.getImageCount()): try: omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages) except TypeError: omeOut.setPlaneDeltaT(Time(dT, unit),0, i + itime*nrImages) omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheT(NonNegativeInteger(itime), 0, i + itime*nrImages) omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages) itime = itime + 1 reader.close() IJ.showProgress(files.index(fileName), len(files)) try: incr = float(dT/(len(files)-1)) except: incr = 0 try: omeOut.setPixelsTimeIncrement(incr, 0) except TypeError: #new Bioformats >5.1.x omeOut.setPixelsTimeIncrement(Time(incr, unit),0) outfile = concatenateImagePlus(files, outfile) if outfile is not None: filein = RandomAccessInputStream(outfile) fileout = RandomAccessOutputStream(outfile) saver = TiffSaver(fileout, outfile) saver.overwriteComment(filein,omeOut.dumpXML()) fileout.close() filein.close() except: traceback.print_exc() finally: #close all possible open files try: reader.close() except: pass try: filein.close() except: pass try: fileout.close() except:
def run(): t_start = datetime.now() image_paths = glob(os.path.join(str(import_dir.getPath()), '*tif')) print '\tread image metadata' reader = ImageReader() in_meta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(in_meta) x_dims = [] y_dims = [] z_dims = [] c_dims = [] t_dims = [] eff = [] spp = [] for image_path in image_paths: print '\t parse %s' % (image_path) reader.setId(image_path) x_dims.append(reader.getSizeX()) y_dims.append(reader.getSizeY()) z_dims.append(reader.getSizeZ()) c_dims.append(reader.getSizeC()) t_dims.append(reader.getSizeT()) eff.append(reader.imageCount / z_dims[-1] / t_dims[-1]) spp.append(reader.getSizeC() / eff[-1]) format = FormatTools.getPixelTypeString(reader.getPixelType()) series = reader.getSeries() big_endian = Boolean.FALSE order = reader.getDimensionOrder() reader.close() # Compute the dimensions of the output file x_dim = max(x_dims) y_dim = max(y_dims) z_dim = max(z_dims) c_dim = max(c_dims) t_dim = max(t_dims) print '\t series: %i' % series print '\t format: %s' % format print '\t dimension order: %s' % order print '\t x: %s -> %i' % (x_dims, x_dim) print '\t y: %s -> %i' % (y_dims, y_dim) print '\t z: %s -> %i' % (z_dims, z_dim) print '\t c: %s -> %i' % (c_dims, c_dim) print '\t t: %s -> %i' % (t_dims, t_dim) print '\t effective size c: %s' % eff print '\t samples per pixel: %s' % spp # Get the time dimension from the number of input files t_dim = len(image_paths) # TODO: Tried to work out the order with Axes class, got something weird though. dimensions = [Short(x_dim), Short(y_dim), Short(c_dim), Short(z_dim)] pixels_per_plane = x_dim * y_dim # Assemble the metadata for the output file out_meta = MetadataTools.createOMEXMLMetadata() out_meta.setImageID(MetadataTools.createLSID('Image', series), series) out_meta.setPixelsID(MetadataTools.createLSID('Pixels', series), series) out_meta.setPixelsBinDataBigEndian(Boolean.TRUE, 0, 0) out_meta.setPixelsDimensionOrder(DimensionOrder.fromString(order), series) out_meta.setPixelsType(PixelType.fromString(format), series) out_meta.setPixelsSizeX(PositiveInteger(x_dim), series) out_meta.setPixelsSizeY(PositiveInteger(y_dim), series) out_meta.setPixelsSizeZ(PositiveInteger(z_dim), series) out_meta.setPixelsSizeC(PositiveInteger(c_dim), series) out_meta.setPixelsSizeT(PositiveInteger(t_dim), series) for c in range(c_dim): out_meta.setChannelID(MetadataTools.createLSID('Channel', series, c), series, c) out_meta.setChannelSamplesPerPixel(PositiveInteger(1), series, c) # Initialize the BF writer result_path = os.path.join(result_dir.getPath(), result_name) writer = ImageWriter() writer.setMetadataRetrieve(out_meta) writer.setId(result_path) print '\tcreated to %s' % (result_path) # Write the stacks into the output file N = len(image_paths) for i, image_path in enumerate(image_paths): status.showStatus(i, N, "catenating %i of %i time-points" % (i, N)) print '\t processing %s' % (image_path) ds = io.open(image_path) xi = ds.dimensionIndex(Axes.X) xv = ds.dimension(xi) yi = ds.dimensionIndex(Axes.Y) yv = ds.dimension(yi) zi = ds.dimensionIndex(Axes.Z) zv = ds.dimension(zi) ti = ds.dimensionIndex(Axes.TIME) tv = ds.dimension(ti) ci = ds.dimensionIndex(Axes.CHANNEL) cv = ds.dimension(ci) dx = float(x_dim - xv) / 2.0 dy = float(y_dim - yv) / 2.0 dz = float(z_dim - zv) / 2.0 print '\t translation vector (dx, dy, dz) = (%f, %f, %f)' % ( dx, dy, dz) if (dx != 0) or (dy != 0) or (dz != 0): stk = Views.translate(ds, long(dx), long(dy), long(0), long(dz)) stk = Views.extendZero(stk) else: stk = Views.extendZero(ds.getImgPlus().getImg()) print '\t writing planes ', n = 0 plane = 1 byte_array = [] interval_view = Views.interval(stk, \ [Long(0), Long(0), Long(0), Long(0)], \ [Long(x_dim - 1), Long(y_dim - 1), Long(c_dim - 1), Long(z_dim - 1)]) cursor = interval_view.cursor() while cursor.hasNext(): n += 1 cursor.fwd() value = cursor.get().getInteger() bytes = DataTools.shortToBytes(value, big_endian) byte_array.extend(bytes) if n == pixels_per_plane: writer.saveBytes(plane - 1, byte_array) print '.', if ((plane) % 10) == 0: print '\n\t ', byte_array = [] plane += 1 n = 0 print ' ' writer.close() t = datetime.now() - t_start print '\twrote %i planes to %s in %i sec.' % (plane - 1, result_path, t.total_seconds()) print '... done.'
from ij.plugin import Concatenator from net.imagej.axis import Axes from jarray import array from net.imglib2.type.numeric.integer import UnsignedByteType import net.imglib2.type.logic.BitType import net.imglib2.algorithm.neighborhood.HyperSphereShape from net.imglib2.type.numeric.real import FloatType,DoubleType from ij.measure import ResultsTable from net.imagej.ops import Ops from loci.plugins.in import ImporterOptions options = ImporterOptions() options.setId(Input_File.getAbsolutePath()) from loci.formats import ImageReader from loci.formats import MetadataTools #get import ready and import reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(Input_File.getAbsolutePath()) seriesCount = reader.getSeriesCount() reader.close() #open image imp, = BF.openImagePlus(options) #get output path variable outdir=Output_File.getAbsolutePath() #get input path variable inpu=Input_File.getAbsolutePath() #convert to RGB IC(imp).convertToRGB() #show image imp.show()
def get_ome_metadata(source, imagenames): """Get the stage coordinates and calibration from the ome-xml for a given list of images Arguments: source {string} -- Path to the images imagenames {list} -- list of images filenames Returns: a tuple that contains: dimensions {int} -- number of dimensions (2D or 3D) stage_coordinates_x {list} -- the abosolute stage x-coordinates from ome-xml metadata stage_coordinates_y {list} -- the abosolute stage y-coordinates from ome-xml metadata stage_coordinates_z {list} -- the abosolute stage z-coordinates from ome-xml metadata relative_coordinates_x_px {list} -- the relative stage x-coordinates in px relative_coordinates_y_px {list} -- the relative stage y-coordinates in px relative_coordinates_z_px {list} -- the relative stage z-coordinates in px image_calibration {list} -- x,y,z image calibration in unit/px calibration_unit {string} -- image calibration unit image_dimensions_czt {list} -- number of images in dimensions c,z,t """ # open an array to store the abosolute stage coordinates from metadata stage_coordinates_x = [] stage_coordinates_y = [] stage_coordinates_z = [] for counter, image in enumerate(imagenames): # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(source + str(image)) # get hyperstack dimensions from the first image if counter == 0: frame_size_x = reader.getSizeX() frame_size_y = reader.getSizeY() frame_size_z = reader.getSizeZ() frame_size_c = reader.getSizeC() frame_size_t = reader.getSizeT() # note the dimensions if frame_size_z == 1: dimensions = 2 if frame_size_z > 1: dimensions = 3 # get the physical calibration for the first image series physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) # workaround to get the z-interval if physSizeZ.value() returns None. z_interval = 1 if physSizeZ is not None: z_interval = physSizeZ.value() if frame_size_z > 1 and physSizeZ is None: print "no z calibration found, trying to recover" first_plane = omeMeta.getPlanePositionZ(0, 0) next_plane_imagenumber = frame_size_c + frame_size_t - 1 second_plane = omeMeta.getPlanePositionZ( 0, next_plane_imagenumber) z_interval = abs( abs(first_plane.value()) - abs(second_plane.value())) print "z-interval seems to be: ", z_interval # create an image calibration image_calibration = [ physSizeX.value(), physSizeY.value(), z_interval ] calibration_unit = physSizeX.unit().getSymbol() image_dimensions_czt = [frame_size_c, frame_size_z, frame_size_t] reader.close() # get the plane position in calibrated units current_position_x = omeMeta.getPlanePositionX(0, 0) current_position_y = omeMeta.getPlanePositionY(0, 0) current_position_z = omeMeta.getPlanePositionZ(0, 0) # get the absolute stage positions and store them pos_x = current_position_x.value() pos_y = current_position_y.value() if current_position_z is None: print "the z-position is missing in the ome-xml metadata." pos_z = 1.0 else: pos_z = current_position_z.value() stage_coordinates_x.append(pos_x) stage_coordinates_y.append(pos_y) stage_coordinates_z.append(pos_z) # calculate the store the relative stage movements in px (for the grid/collection stitcher) relative_coordinates_x_px = [] relative_coordinates_y_px = [] relative_coordinates_z_px = [] for i in range(len(stage_coordinates_x)): rel_pos_x = (stage_coordinates_x[i] - stage_coordinates_x[0]) / physSizeX.value() rel_pos_y = (stage_coordinates_y[i] - stage_coordinates_y[0]) / physSizeY.value() rel_pos_z = (stage_coordinates_z[i] - stage_coordinates_z[0]) / z_interval relative_coordinates_x_px.append(rel_pos_x) relative_coordinates_y_px.append(rel_pos_y) relative_coordinates_z_px.append(rel_pos_z) return (dimensions, stage_coordinates_x, stage_coordinates_y, stage_coordinates_z, relative_coordinates_x_px, relative_coordinates_y_px, relative_coordinates_z_px, image_calibration, calibration_unit, image_dimensions_czt)
def readZeissHeader(self, infoStr): # This is incredibly difficult to get working as (date, time, voxels) are in different obscure places in lsm and czi # Furthermore, just trying to read the raw ome xls is futile # # parsing ome xml as a string and searching it with regular expression(re) does not work # it is beyond the scope of my work to figure this out # the fact that it does not work and there is little documentaiton is a pretty big waste of time # # get and parse xml to find date/time #fi = self.imp.getOriginalFileInfo(); # returns a FileInfo object #omexml = fi.description #omexml is a string #omexml = omexml.encode('utf-8') #omexml = omexml.replaceAll("[^\\x20-\\x7e]", "") # see: https://stackoverflow.com/questions/2599919/java-parsing-xml-document-gives-content-not-allowed-in-prolog-error # (1) try and search the ome xml like a string, this gives errors #docsPattern = '<AcquisitionDate>.*</AcquisitionDate>' #searchresult = re.search(docsPattern, omexml) #print 'searchresult:', searchresult.group(0) # 2) treat the ome xml like any other xml (because it's xml, right?) # well this raises errors too #omexml has <AcquisitionDate>2016-08-17T15:21:50</AcquisitionDate> #import xml.etree.ElementTree #e = xml.etree.ElementTree.fromstring(omexml).getroot() #print omexml #for atype in e.findall('AcquisitionDate'): # print 'AcquisitionDate:', atype #.get('foobar') # # if self.islsm: # lsm have date hidden in omeMeta.getImageAcquisitionDate(0) # this is copied from code at: https://gist.github.com/ctrueden/6282856 reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() #omeMeta.getImageAcquisitionDate(0) reader.setMetadataStore(omeMeta) reader.setId(self.filepath) #seriesCount = reader.getSeriesCount() dateTimeStr = omeMeta.getImageAcquisitionDate(0) #2016-08-17T16:36:26 reader.close() if dateTimeStr: self.dateStr, self.timeStr = dateTimeStr.toString().split('T') self.dateStr = bFixDate(self.dateStr) self.timeStr = bFixTime(self.timeStr) #bPrintLog('LSM date/time is: ' + self.dateStr + ' ' + self.timeStr, 3) else: bPrintLog('WARNING: did not get Zeiss date/time string') # lsm have voxels in infoStr for line in infoStr.split('\n'): #print line if line.find('VoxelSizeX') != -1: self.voxelx = float(line.split('=')[1]) if line.find('VoxelSizeY') != -1: self.voxely = float(line.split('=')[1]) if line.find('VoxelSizeZ') != -1: self.voxelz = float(line.split('=')[1]) if line.find('SizeC') != -1: self.numChannels = int(line.split('=')[1]) #if line.find('BitsPerPixel') and not line.startswith('Experiment') != -1: # 20170811, startswith is for czi # self.bitsPerPixel = int(line.split('=')[1]) if line.find('RecordingZoomX#1') != -1: self.zoom = int(line.split('=')[1]) if self.isczi: # czi has date/time in infoStr (lsm does not) for line in infoStr.split('\n'): if line.find('CreationDate #1') != -1: # w.t.f. is #1 referring to? lhs, rhs = line.split('=') rhs = rhs.replace(' ', ' ') if rhs.startswith(' '): rhs = rhs[1:-1] #print "lhs: '" + lhs + "'" + "rhs: '" + rhs + "'" if rhs.find('T') != -1: self.dateStr, self.timeStr = rhs.split('T') else: self.dateStr, self.timeStr = rhs.split(' ') self.dateStr = bFixDate(self.dateStr) self.timeStr = bFixTime(self.timeStr) #bPrintLog('CZI date/time is: ' + self.dateStr + ' ' + self.timeStr, 3) # .czi # <Pixels BigEndian="false" DimensionOrder="XYCZT" ID="Pixels:0" Interleaved="false" PhysicalSizeX="0.20756645602494875" PhysicalSizeXUnit="µm" PhysicalSizeY="0.20756645602494875" PhysicalSizeYUnit="µm" PhysicalSizeZ="0.75" PhysicalSizeZUnit="µm" SignificantBits="8" SizeC="1" SizeT="1" SizeX="1024" SizeY="1024" SizeZ="50" Type="uint8"> # czi have voxel in calibration self.voxelx = self.imp.getCalibration().pixelWidth; self.voxely = self.imp.getCalibration().pixelHeight; self.voxelz = self.imp.getCalibration().pixelDepth; #bPrintLog('readZeissHeader() read czi scale as: ' + str(self.voxelx) + ' ' + str(self.voxely) + ' ' + str(self.voxelz), 3) # CLEARING self.infoStr for CZI ... it was WAY to big to parse in Map Manager self.infoStr = ''
def get_reader(file, complete_meta): reader = ImageReader() reader.setMetadataStore(complete_meta) reader.setId(file) return reader