def DirList(baseDir): r = ImageReader() imgStats = {} for root, dirs, files in os.walk(str(baseDir)): for f1 in files: if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith( ".jpeg"): id = root + "/" + f1 r.setId(id) if r is None: print "Couldn\'t open image from file:", id continue w = r.getSizeX() h = r.getSizeY() imgStats[str(w) + "_" + str(h)] = imgStats.get(str(w) + "_" + str(h), 0) + 1 IJ.log("Found image: " + str(id)) #counter += 1 r.close() #print summary summary = '' for k, v in imgStats.iteritems(): dim = k.split("_") ratio = float(dim[0]) / float(dim[1]) IJ.log("Found " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2))) summary = summary + "\nFound " + str( v) + " images of dimension " + str(dim[0]) + "x" + str( dim[1]) + " apect ratio " + str(round(ratio, 2)) return summary
def get_calibration_from_metadata(path_to_image): """get the pixel calibration from a given image using Bio-Formats Parameters ---------- path_to_image : str full path to the input image Returns ------- array the physical px size as float for x,y,z """ reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(str(path_to_image)) physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) image_calibration = [physSizeX.value(), physSizeY.value(), physSizeZ.value()] reader.close() return image_calibration
def DirList(baseDir): r = ImageReader() imgStats = {} for root, dirs, files in os.walk(str(baseDir)): for f1 in files: if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith(".jpeg"): id = root + "/" + f1 r.setId(id) if r is None: print "Couldn\'t open image from file:", id continue w = r.getSizeX() h = r.getSizeY() imgStats[str(w) + "_" + str(h)] = imgStats.get(str(w) + "_" + str(h), 0)+1 IJ.log("Found image: " + str(id)) #counter += 1 r.close() #print summary summary = '' for k, v in imgStats.iteritems(): dim = k.split("_") ratio = float(dim[0])/float(dim[1]) IJ.log("Found " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2))) summary = summary + "\nFound " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2)) return summary
def Z1_metadata(sourcefile): # Access header of Z1 lighsheet data to determine nb views reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(sourcefile) seriesCount = reader.getSeriesCount() reader.close() return seriesCount
def get_metadata(imagefile, imageID=0): metainfo = {} # initialize the reader and get the OME metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() metainfo['ImageCount_OME'] = omeMeta.getImageCount() reader.setMetadataStore(omeMeta) reader.setId(imagefile) metainfo['SeriesCount_BF'] = reader.getSeriesCount() reader.close() # read dimensions TZCXY from OME metadata metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue() metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue() metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue() metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue() metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue() # store info about stack if metainfo['SizeZ'] == 1: metainfo['is3d'] = False elif metainfo['SizeZ'] > 1: metainfo['is3d'] = True # get the scaling for XYZ physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) if physSizeX is not None: metainfo['ScaleX'] = round(physSizeX.value(), 3) metainfo['ScaleY'] = round(physSizeY.value(), 3) if physSizeX is None: metainfo['ScaleX'] = None metainfo['ScaleY'] = None if physSizeZ is not None: metainfo['ScaleZ'] = round(physSizeZ.value(), 3) if physSizeZ is None: metainfo['ScaleZ'] = None # sort the dictionary metainfo = OrderedDict(sorted(metainfo.items())) return metainfo
def get_metadata(params): """get image metadata, either from the image file or from acquisition-time metadata""" if params.metadata_source == "Image metadata": try: reader = ImageReader() ome_meta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(ome_meta) reader.setId(params.input_image_path) reader.close() params.setFrameInterval( ome_meta.getPixelsTimeIncrement(0).value()) params.setIntervalUnit( ome_meta.getPixelsTimeIncrement(0).unit().getSymbol()) params.setPixelPhysicalSize( ome_meta.getPixelsPhysicalSizeX(0).value()) params.setPixelSizeUnit( ome_meta.getPixelsPhysicalSizeX(0).unit().getSymbol()) params.setMetadataSourceFile(None) except Exception as e: print(e.message) mbui.warning_dialog([ "There was a problem getting metadata from the image: ", e.message, "Please consider using acquisition metadata instead (click OK). ", "Or, quit the analysis run and investigate image metadata by hand. " ]) params.setMetadataSource("Acquisition metadata") if params.metadata_source == "Acquisition metadata": od = OpenDialog('Choose acquisition metadata file...', os.path.dirname(params.input_image_path), '*.txt') file_path = od.getPath() if file_path is None: raise IOError('no metadata file chosen') acq_metadata_dict = import_iq3_metadata(file_path) try: params.setFrameInterval(acq_metadata_dict['frame_interval']) except KeyError: params.setFrameInterval(1.0) try: params.setIntervalUnit(acq_metadata_dict['time_unit']) except KeyError: params.setIntervalUnit('frames') params.setPixelPhysicalSize(acq_metadata_dict['x_physical_size']) params.setPixelSizeUnit(acq_metadata_dict['x_unit']) params.setMetadataSourceFile(file_path) return params
def load_ome_img(file_name): """ :param file_name: :return: """ imps = BF.openImagePlus(file_name) imag = imps[0] # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(file_name) print(omeMeta) reader.close() return (imag, omeMeta)
def time_parser(): """ Iterates through timelapse, """ """ outputs timepoints with corresponding seriesnames. """ """ - S. Grødem 2017 """ # Get metadata. reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(str(Experiment)) # Extracts number of image series, channel number seriesCount = reader.getSeriesCount() reader.close() # Gets timepoints, in minutes. timelist = [] namelist = [] for timepoint in range (seriesCount): times = omeMeta.getImageAcquisitionDate(timepoint) timelist.append(times.toString()) namelist.append(omeMeta.getImageName(timepoint)) # YY.MM... to minutes. timelist =[ time.mktime(time.strptime(times, u'%Y-%m-%dT%H:%M:%S')) for times in timelist ] timelist_unsorted =[ (times - timelist[0])/60 for times in timelist ] # Sort timepoints. timelist, namelist = zip(*sorted(zip(timelist_unsorted, namelist))) timelist = [round(float(i), 3) for i in timelist] # Output to IJ log images = zip(timelist, namelist) IJ.log("Series number: " + str(seriesCount)) IJ.log("*"*15) for i in range(len(images)): IJ.log("Name: " + str(images[i][1])) IJ.log("Time: " + str(images[i][0])) IJ.log("-"*15)
def choose_series(filepath, params): """if input file contains more than one image series (xy position), prompt user to choose which one to use""" # todo: if necessary (e.g. if lots of series), can improve thumbnail visuals based loosely on https://github.com/ome/bio-formats-imagej/blob/master/src/main/java/loci/plugins/in/SeriesDialog.java import_opts = ImporterOptions(); import_opts.setId(filepath); reader = ImageReader(); ome_meta = MetadataTools.createOMEXMLMetadata(); reader.setMetadataStore(ome_meta); reader.setId(filepath); no_series = reader.getSeriesCount(); if no_series == 1: return import_opts, params; else: series_names = [ome_meta.getImageName(idx) for idx in range(no_series)]; dialog = GenericDialog("Select series to load..."); dialog.addMessage("There are multiple series in this file! \n" + "This is probably because there are multiple XY stage positions. \n " + "Please choose which series to load: "); thumbreader = BufferedImageReader(reader); cbg = CheckboxGroup(); for idx in range(no_series): p = Panel(); p.add(Box.createRigidArea(Dimension(thumbreader.getThumbSizeX(), thumbreader.getThumbSizeY()))); ThumbLoader.loadThumb(thumbreader, idx, p, True); dialog.addPanel(p); cb = Checkbox(series_names[idx], cbg, idx==0); p.add(cb); dialog.showDialog(); if dialog.wasCanceled(): raise KeyboardInterrupt("Run canceled"); if dialog.wasOKed(): selected_item = cbg.getSelectedCheckbox().getLabel(); selected_index = series_names.index(selected_item); params.setSelectedSeriesIndex(selected_index); for idx in range(0, no_series): import_opts.setSeriesOn(idx, True) if (idx==selected_index) else import_opts.setSeriesOn(idx, False); reader.close(); return import_opts, params
def processMovie(root, files, outfile): """Concatenate images and write ome.tiff file. If image contains already multiple time points just copy the image""" files.sort() options = ImporterOptions() options.setId(files[0]) options.setVirtual(1) image = BF.openImagePlus(options) image = image[0] if image.getNFrames() > 1: msg = ("%s Contains multiple time points. Can only concatenate" " single time points!" %files[0]) raise RuntimeError(msg) image.close() reader = ImageReader() reader.setMetadataStore(MetadataTools.createOMEXMLMetadata()) reader.setId(files[0]) timeInfo = [] omeOut = reader.getMetadataStore() omeOut = setUpXml(omeOut, image, files) reader.close() image.close() itime = 0 for fileName in files: omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(fileName) timeInfo.append(getTimePoint(reader, omeMeta)) nrImages = reader.getImageCount() for i in range(0, reader.getImageCount()): try: dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2) except: dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages) omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheT(omeOut.getPlaneTheT(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages) itime = itime + 1 reader.close() IJ.showProgress(files.index(fileName), len(files)) try: omeOut.setPixelsTimeIncrement(float(dT/(len(files)-1)), 0) except: omeOut.setPixelsTimeIncrement(0, 0) if len(files) <= 1: raise RuntimeError('Found only one file. Nothing to concatenate') outfile = concatenateImagePlus(files, outfile) filein = RandomAccessInputStream(outfile) fileout = RandomAccessOutputStream(outfile) saver = TiffSaver(fileout, outfile) saver.overwriteComment(filein, omeOut.dumpXML()) fileout.close() filein.close()
nYTiles = nYTiles + 1 for y in range(nYTiles): for x in range(nXTiles): # Calculate the correct size and offset for each tile tileX = x * tileSizeX tileY = y * tileSizeY effTileSizeX = tileSizeX * currentScale if ((tileX * currentScale) + effTileSizeX) >= width: effTileSizeX = width - (tileX * currentScale) effTileSizeY = tileSizeY * currentScale if ((tileY * currentScale) + effTileSizeY) >= height: effTileSizeY = height - (tileY * currentScale) # Read the tile, create the downsampled version and then write to output tile = reader.openBytes(image, tileX * currentScale, tileY * currentScale, effTileSizeX, effTileSizeY) downsample = scaler.downsample(tile, effTileSizeX, effTileSizeY, currentScale, FormatTools.getBytesPerPixel(type), reader.isLittleEndian(), FormatTools.isFloatingPoint(type), reader.getRGBChannelCount(), reader.isInterleaved()) writer.saveBytes(image, downsample, tileX, tileY, effTileSizeX / currentScale, effTileSizeY / currentScale) writer.close(); reader.close(); IJ.log("Done") options = ImporterOptions() options.setColorMode(ImporterOptions.COLOR_MODE_COMPOSITE) options.setId(outFile) options.setSeriesOn(2, True); imps = BF.openImagePlus(options) for imp in imps: imp.show()
def readczi(imagefile, stitchtiles=True, setflatres=False, readpylevel=0, setconcat=True, openallseries=True, showomexml=False, attach=False, autoscale=True): log.info('Filename : ' + imagefile) metainfo = {} # checking for thr file Extension metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile)) log.info('Detected File Extension : ' + metainfo['Extension']) # initialize the reader and get the OME metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() #metainfo['ImageCount_OME'] = omeMeta.getImageCount() reader.setMetadataStore(omeMeta) reader.setId(imagefile) metainfo['SeriesCount_BF'] = reader.getSeriesCount() reader.close() # get the scaling for XYZ physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) if physSizeX is not None: metainfo['ScaleX'] = round(physSizeX.value(), 3) metainfo['ScaleY'] = round(physSizeX.value(), 3) if physSizeX is None: metainfo['ScaleX'] = None metainfo['ScaleY'] = None if physSizeZ is not None: metainfo['ScaleZ'] = round(physSizeZ.value(), 3) if physSizeZ is None: metainfo['ScaleZ'] = None options = DynamicMetadataOptions() options.setBoolean("zeissczi.autostitch", stitchtiles) options.setBoolean("zeissczi.attachments", attach) czireader = ZeissCZIReader() czireader.setFlattenedResolutions(setflatres) czireader.setMetadataOptions(options) czireader.setId(imagefile) # Set the preferences in the ImageJ plugin # Note although these preferences are applied, they are not refreshed in the UI Prefs.set("bioformats.zeissczi.allow.autostitch", str(stitchtiles).lower()) Prefs.set("bioformats.zeissczi.include.attachments", str(attach).lower()) # metainfo = {} metainfo['rescount'] = czireader.getResolutionCount() metainfo['SeriesCount_CZI'] = czireader.getSeriesCount() #metainfo['flatres'] = czireader.hasFlattenedResolutions() #metainfo['getreslevel'] = czireader.getResolution() # Dimensions metainfo['SizeT'] = czireader.getSizeT() metainfo['SizeZ'] = czireader.getSizeZ() metainfo['SizeC'] = czireader.getSizeC() metainfo['SizeX'] = czireader.getSizeX() metainfo['SizeY'] = czireader.getSizeY() # check for autostitching and possibility to read attachment metainfo['AllowAutoStitching'] = czireader.allowAutostitching() metainfo['CanReadAttachments'] = czireader.canReadAttachments() # read in and display ImagePlus(es) with arguments options = ImporterOptions() options.setOpenAllSeries(openallseries) options.setShowOMEXML(showomexml) options.setConcatenate(setconcat) options.setAutoscale(autoscale) options.setId(imagefile) # open the ImgPlus imps = BF.openImagePlus(options) metainfo['Pyramid Level Output'] = readpylevel + 1 try: imp = imps[readpylevel] pylevelout = metainfo['SeriesCount_CZI'] except: # fallback option log.info('PyLevel=' + str(readpylevel) + ' does not exist.') log.info('Using Pyramid Level = 0 as fallback.') imp = imps[0] pylevelout = 0 metainfo['Pyramid Level Output'] = pylevelout # get the stack and some info imgstack = imp.getImageStack() metainfo['Output Slices'] = imgstack.getSize() metainfo['Output SizeX'] = imgstack.getWidth() metainfo['Output SizeY'] = imgstack.getHeight() # calc scaling in case of pyramid scale = float(metainfo['SizeX']) / float(metainfo['Output SizeX']) metainfo['Pyramid Scale Factor'] = scale metainfo['ScaleX Output'] = metainfo['ScaleX'] * scale metainfo['ScaleY Output'] = metainfo['ScaleY'] * scale # set the correct scaling imp = MiscTools.setscale(imp, scaleX=metainfo['ScaleX Output'], scaleY=metainfo['ScaleX Output'], scaleZ=metainfo['ScaleZ'], unit="micron") # close czireader czireader.close() return imp, metainfo
from ij import IJ from ij.io import OpenDialog from loci.formats import ImageReader from loci.formats import MetadataTools # open file od = OpenDialog("Choose a file"); filepath = od.getPath() print("Image path: " + filepath); # use bio-formats to extract information reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(filepath) seriesCount = reader.getSeriesCount() print "Series count:",seriesCount reader.close()
def main(): Interpreter.batchMode = True if (lambda_flat == 0) ^ (lambda_dark == 0): print ("ERROR: Both of lambda_flat and lambda_dark must be zero," " or both non-zero.") return lambda_estimate = "Automatic" if lambda_flat == 0 else "Manual" print "Loading images..." # Use BioFormats reader directly to determine dataset dimensions without # reading every single image. The series count (num_images) is the one value # we can't easily get any other way, but we might as well grab the others # while we have the reader available. bfreader = ImageReader() bfreader.id = str(filename) num_images = bfreader.seriesCount num_channels = bfreader.sizeC width = bfreader.sizeX height = bfreader.sizeY bfreader.close() # The internal initialization of the BaSiC code fails when we invoke it via # scripting, unless we explicitly set a the private 'noOfSlices' field. # Since it's private, we need to use Java reflection to access it. Basic_noOfSlices = Basic.getDeclaredField('noOfSlices') Basic_noOfSlices.setAccessible(True) basic = Basic() Basic_noOfSlices.setInt(basic, num_images) # Pre-allocate the output profile images, since we have all the dimensions. ff_image = IJ.createImage("Flat-field", width, height, num_channels, 32); df_image = IJ.createImage("Dark-field", width, height, num_channels, 32); print("\n\n") # BaSiC works on one channel at a time, so we only read the images from one # channel at a time to limit memory usage. for channel in range(num_channels): print "Processing channel %d/%d..." % (channel + 1, num_channels) print "===========================" options = ImporterOptions() options.id = str(filename) options.setOpenAllSeries(True) # concatenate=True gives us a single stack rather than a list of # separate images. options.setConcatenate(True) # Limit the reader to the channel we're currently working on. This loop # is mainly why we need to know num_images before opening anything. for i in range(num_images): options.setCBegin(i, channel) options.setCEnd(i, channel) # openImagePlus returns a list of images, but we expect just one (a # stack). input_image = BF.openImagePlus(options)[0] # BaSiC seems to require the input image is actually the ImageJ # "current" image, otherwise it prints an error and aborts. WindowManager.setTempCurrentImage(input_image) basic.exec( input_image, None, None, "Estimate shading profiles", "Estimate both flat-field and dark-field", lambda_estimate, lambda_flat, lambda_dark, "Ignore", "Compute shading only" ) input_image.close() # Copy the pixels from the BaSiC-generated profile images to the # corresponding channel of our output images. ff_channel = WindowManager.getImage("Flat-field:%s" % input_image.title) ff_image.slice = channel + 1 ff_image.getProcessor().insert(ff_channel.getProcessor(), 0, 0) ff_channel.close() df_channel = WindowManager.getImage("Dark-field:%s" % input_image.title) df_image.slice = channel + 1 df_image.getProcessor().insert(df_channel.getProcessor(), 0, 0) df_channel.close() print("\n\n") template = '%s/%s-%%s.tif' % (output_dir, experiment_name) ff_filename = template % 'ffp' IJ.saveAsTiff(ff_image, ff_filename) ff_image.close() df_filename = template % 'dfp' IJ.saveAsTiff(df_image, df_filename) df_image.close() print "Done!"
def openfile(imagefile, stitchtiles=True, setflatres=False, readpylevel=0, setconcat=True, openallseries=True, showomexml=False, attach=False, autoscale=True, imageID=0): # stitchtiles = option of CZIReader to return the raw tiles as # individual series rather than the auto-stitched images metainfo = {} # checking for thr file Extension metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile)) # initialite the reader and get the OME metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() metainfo['ImageCount_OME'] = omeMeta.getImageCount() reader.setMetadataStore(omeMeta) reader.setId(imagefile) metainfo['SeriesCount_BF'] = reader.getSeriesCount() reader.close() # read dimensions TZCXY from OME metadata metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue() metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue() metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue() metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue() metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue() # store info about stack if metainfo['SizeZ'] == 1: metainfo['is3d'] = False elif metainfo['SizeZ'] > 1: metainfo['is3d'] = True # get the scaling for XYZ physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) if physSizeX is not None: metainfo['ScaleX'] = round(physSizeX.value(), 3) metainfo['ScaleY'] = round(physSizeX.value(), 3) if physSizeX is None: metainfo['ScaleX'] = None metainfo['ScaleY'] = None if physSizeZ is not None: metainfo['ScaleZ'] = round(physSizeZ.value(), 3) if physSizeZ is None: metainfo['ScaleZ'] = None # if image file is Carl Zeiss Image - CZI if metainfo['Extension'] == '.czi': # read the CZI file using the CZIReader # pylevel = 0 - read the full resolution image imp, metainfo = ImportTools.readCZI(imagefile, metainfo, stitchtiles=stitchtiles, setflatres=setflatres, readpylevel=readpylevel, setconcat=setconcat, openallseries=openallseries, showomexml=showomexml, attach=attach, autoscale=autoscale) # if image file is not Carl Zeiss Image - CZI if metainfo['Extension'] != '.czi': # read the imagefile using the correct method if metainfo['Extension'].lower() == ('.jpg' or '.jpeg'): # use dedicated method for jpg imp, metainfo = ImageTools.openjpg(imagefile, method='IJ') else: # if not jpg - use BioFormats imp, metainfo = ImportTools.readbf(imagefile, metainfo, setflatres=setflatres, readpylevel=readpylevel, setconcat=setconcat, openallseries=openallseries, showomexml=showomexml, autoscale=autoscale) return imp, metainfo
width = r.getSizeX() height = r.getSizeY() md = r.getGlobalMetadata() # print(type(md)) # print(num, width, height) stack = ImageStack(width, height) i = 0 ip = r.openProcessors(i)[0] stack.addSlice("1", ip); imp = ImagePlus("foo", stack); r.close() imp.show() IJ.run("Enhance Contrast", "saturated=0.35") imageReader = ImageReader() meta = MetadataTools.createOMEXMLMetadata() imageReader.setMetadataStore(meta) imageReader.setId(filePath) pSizeX = meta.getPixelsPhysicalSizeX(0) pSizeY = meta.getPixelsPhysicalSizeY(0) imageReader.close() print(pSizeX, pSizeY) print(meta.getPixelsSizeX(0)) print(meta.getPixelsSizeY(0))
def nucleus_detection(infile, nucleus_channel, stacksize, animation): # Detect nucleus with 3d log filters fullpath = infile infile = filename(infile) IJ.log("Start Segmentation " + str(infile)) # First get Nb Stacks reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(fullpath) default_options = "stack_order=XYCZT color_mode=Composite view=Hyperstack specify_range c_begin=" + \ str(nucleus_channel) + " c_end=" + str(nucleus_channel) + \ " c_step=1 open=[" + fullpath + "]" NbStack = reader.getSizeZ() reader.close() output = re.sub('.ids', '.csv', infile) with open(os.path.join(folder5, output), 'wb') as outfile: DETECTwriter = csv.writer(outfile, delimiter=',') DETECTwriter.writerow( ['spotID', 'roundID', 'X', 'Y', 'Z', 'QUALITY', 'SNR', 'INTENSITY']) rounds = NbStack // stacksize spotID = 1 for roundid in xrange(1, rounds + 2): # Process stacksize by stacksize otherwise crash because too many spots Zstart = (stacksize * roundid - stacksize + 1) Zend = (stacksize * roundid) if(Zend > NbStack): Zend = NbStack % stacksize + (roundid - 1) * stacksize IJ.log("Round:" + str(roundid) + ' Zstart=' + str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack)) IJ.run("Bio-Formats Importer", default_options + " z_begin=" + str(Zstart) + " z_end=" + str(Zend) + " z_step=1") imp = IJ.getImage() imp.show() cal = imp.getCalibration() model = Model() settings = Settings() settings.setFrom(imp) # Configure detector - Manually determined as best settings.detectorFactory = LogDetectorFactory() settings.detectorSettings = { 'DO_SUBPIXEL_LOCALIZATION': True, 'RADIUS': 5.5, 'TARGET_CHANNEL': 1, 'THRESHOLD': 50.0, 'DO_MEDIAN_FILTERING': False, } filter1 = FeatureFilter('QUALITY', 1, True) settings.addSpotFilter(filter1) settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() trackmate = TrackMate(model, settings) ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) try: ok = trackmate.process() except: IJ.log("Nothing detected, Round:" + str(roundid) + ' Zstart=' + str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack)) IJ.selectWindow(infile) IJ.run('Close') continue else: if animation: # For plotting purpose only imp.setPosition(1, 1, imp.getNFrames()) imp.getProcessor().setMinAndMax(0, 4000) selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() for i in xrange(1, imp.getNSlices() + 1): imp.setSlice(i) time.sleep(0.05) IJ.selectWindow(infile) IJ.run('Close') spots = model.getSpots() spotIt = spots.iterator(0, False) sid = [] sroundid = [] x = [] y = [] z = [] q = [] snr = [] intensity = [] for spot in spotIt: sid.append(spotID) spotID = spotID + 1 sroundid.append(roundid) x.append(spot.getFeature('POSITION_X')) y.append(spot.getFeature('POSITION_Y')) q.append(spot.getFeature('QUALITY')) snr.append(spot.getFeature('SNR')) intensity.append(spot.getFeature('MEAN_INTENSITY')) # Correct Z position correct_z = spot.getFeature( 'POSITION_Z') + (roundid - 1) * float(stacksize) * cal.pixelDepth z.append(correct_z) with open(os.path.join(folder5, output), 'ab') as outfile: DETECTwriter = csv.writer(outfile, delimiter=',') Sdata = zip(sid, sroundid, x, y, z, q, snr, intensity) for Srow in Sdata: DETECTwriter.writerow(Srow)
import net.imglib2.algorithm.neighborhood.HyperSphereShape from net.imglib2.type.numeric.real import FloatType,DoubleType from ij.measure import ResultsTable from net.imagej.ops import Ops from loci.plugins.in import ImporterOptions options = ImporterOptions() options.setId(Input_File.getAbsolutePath()) from loci.formats import ImageReader from loci.formats import MetadataTools #get import ready and import reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(Input_File.getAbsolutePath()) seriesCount = reader.getSeriesCount() reader.close() #open image imp, = BF.openImagePlus(options) #get output path variable outdir=Output_File.getAbsolutePath() #get input path variable inpu=Input_File.getAbsolutePath() #convert to RGB IC(imp).convertToRGB() #show image imp.show() #Define ROI of whole image (basically) imp.setRoi(1,1,478,479) ######OPTIONAL##############
def get_ome_metadata(source, imagenames): """Get the stage coordinates and calibration from the ome-xml for a given list of images Arguments: source {string} -- Path to the images imagenames {list} -- list of images filenames Returns: a tuple that contains: dimensions {int} -- number of dimensions (2D or 3D) stage_coordinates_x {list} -- the abosolute stage x-coordinates from ome-xml metadata stage_coordinates_y {list} -- the abosolute stage y-coordinates from ome-xml metadata stage_coordinates_z {list} -- the abosolute stage z-coordinates from ome-xml metadata relative_coordinates_x_px {list} -- the relative stage x-coordinates in px relative_coordinates_y_px {list} -- the relative stage y-coordinates in px relative_coordinates_z_px {list} -- the relative stage z-coordinates in px image_calibration {list} -- x,y,z image calibration in unit/px calibration_unit {string} -- image calibration unit image_dimensions_czt {list} -- number of images in dimensions c,z,t """ # open an array to store the abosolute stage coordinates from metadata stage_coordinates_x = [] stage_coordinates_y = [] stage_coordinates_z = [] for counter, image in enumerate(imagenames): # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(source + str(image)) # get hyperstack dimensions from the first image if counter == 0: frame_size_x = reader.getSizeX() frame_size_y = reader.getSizeY() frame_size_z = reader.getSizeZ() frame_size_c = reader.getSizeC() frame_size_t = reader.getSizeT() # note the dimensions if frame_size_z == 1: dimensions = 2 if frame_size_z > 1: dimensions = 3 # get the physical calibration for the first image series physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) # workaround to get the z-interval if physSizeZ.value() returns None. z_interval = 1 if physSizeZ is not None: z_interval = physSizeZ.value() if frame_size_z > 1 and physSizeZ is None: print "no z calibration found, trying to recover" first_plane = omeMeta.getPlanePositionZ(0, 0) next_plane_imagenumber = frame_size_c + frame_size_t - 1 second_plane = omeMeta.getPlanePositionZ( 0, next_plane_imagenumber) z_interval = abs( abs(first_plane.value()) - abs(second_plane.value())) print "z-interval seems to be: ", z_interval # create an image calibration image_calibration = [ physSizeX.value(), physSizeY.value(), z_interval ] calibration_unit = physSizeX.unit().getSymbol() image_dimensions_czt = [frame_size_c, frame_size_z, frame_size_t] reader.close() # get the plane position in calibrated units current_position_x = omeMeta.getPlanePositionX(0, 0) current_position_y = omeMeta.getPlanePositionY(0, 0) current_position_z = omeMeta.getPlanePositionZ(0, 0) # get the absolute stage positions and store them pos_x = current_position_x.value() pos_y = current_position_y.value() if current_position_z is None: print "the z-position is missing in the ome-xml metadata." pos_z = 1.0 else: pos_z = current_position_z.value() stage_coordinates_x.append(pos_x) stage_coordinates_y.append(pos_y) stage_coordinates_z.append(pos_z) # calculate the store the relative stage movements in px (for the grid/collection stitcher) relative_coordinates_x_px = [] relative_coordinates_y_px = [] relative_coordinates_z_px = [] for i in range(len(stage_coordinates_x)): rel_pos_x = (stage_coordinates_x[i] - stage_coordinates_x[0]) / physSizeX.value() rel_pos_y = (stage_coordinates_y[i] - stage_coordinates_y[0]) / physSizeY.value() rel_pos_z = (stage_coordinates_z[i] - stage_coordinates_z[0]) / z_interval relative_coordinates_x_px.append(rel_pos_x) relative_coordinates_y_px.append(rel_pos_y) relative_coordinates_z_px.append(rel_pos_z) return (dimensions, stage_coordinates_x, stage_coordinates_y, stage_coordinates_z, relative_coordinates_x_px, relative_coordinates_y_px, relative_coordinates_z_px, image_calibration, calibration_unit, image_dimensions_czt)
def run(): t_start = datetime.now() image_paths = glob(os.path.join(str(import_dir.getPath()), '*tif')) print '\tread image metadata' reader = ImageReader() in_meta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(in_meta) x_dims = [] y_dims = [] z_dims = [] c_dims = [] t_dims = [] eff = [] spp = [] for image_path in image_paths: print '\t parse %s' % (image_path) reader.setId(image_path) x_dims.append(reader.getSizeX()) y_dims.append(reader.getSizeY()) z_dims.append(reader.getSizeZ()) c_dims.append(reader.getSizeC()) t_dims.append(reader.getSizeT()) eff.append(reader.imageCount / z_dims[-1] / t_dims[-1]) spp.append(reader.getSizeC() / eff[-1]) format = FormatTools.getPixelTypeString(reader.getPixelType()) series = reader.getSeries() big_endian = Boolean.FALSE order = reader.getDimensionOrder() reader.close() # Compute the dimensions of the output file x_dim = max(x_dims) y_dim = max(y_dims) z_dim = max(z_dims) c_dim = max(c_dims) t_dim = max(t_dims) print '\t series: %i' % series print '\t format: %s' % format print '\t dimension order: %s' % order print '\t x: %s -> %i' % (x_dims, x_dim) print '\t y: %s -> %i' % (y_dims, y_dim) print '\t z: %s -> %i' % (z_dims, z_dim) print '\t c: %s -> %i' % (c_dims, c_dim) print '\t t: %s -> %i' % (t_dims, t_dim) print '\t effective size c: %s' % eff print '\t samples per pixel: %s' % spp # Get the time dimension from the number of input files t_dim = len(image_paths) # TODO: Tried to work out the order with Axes class, got something weird though. dimensions = [Short(x_dim), Short(y_dim), Short(c_dim), Short(z_dim)] pixels_per_plane = x_dim * y_dim # Assemble the metadata for the output file out_meta = MetadataTools.createOMEXMLMetadata() out_meta.setImageID(MetadataTools.createLSID('Image', series), series) out_meta.setPixelsID(MetadataTools.createLSID('Pixels', series), series) out_meta.setPixelsBinDataBigEndian(Boolean.TRUE, 0, 0) out_meta.setPixelsDimensionOrder(DimensionOrder.fromString(order), series) out_meta.setPixelsType(PixelType.fromString(format), series) out_meta.setPixelsSizeX(PositiveInteger(x_dim), series) out_meta.setPixelsSizeY(PositiveInteger(y_dim), series) out_meta.setPixelsSizeZ(PositiveInteger(z_dim), series) out_meta.setPixelsSizeC(PositiveInteger(c_dim), series) out_meta.setPixelsSizeT(PositiveInteger(t_dim), series) for c in range(c_dim): out_meta.setChannelID(MetadataTools.createLSID('Channel', series, c), series, c) out_meta.setChannelSamplesPerPixel(PositiveInteger(1), series, c) # Initialize the BF writer result_path = os.path.join(result_dir.getPath(), result_name) writer = ImageWriter() writer.setMetadataRetrieve(out_meta) writer.setId(result_path) print '\tcreated to %s' % (result_path) # Write the stacks into the output file N = len(image_paths) for i, image_path in enumerate(image_paths): status.showStatus(i, N, "catenating %i of %i time-points" % (i, N)) print '\t processing %s' % (image_path) ds = io.open(image_path) xi = ds.dimensionIndex(Axes.X) xv = ds.dimension(xi) yi = ds.dimensionIndex(Axes.Y) yv = ds.dimension(yi) zi = ds.dimensionIndex(Axes.Z) zv = ds.dimension(zi) ti = ds.dimensionIndex(Axes.TIME) tv = ds.dimension(ti) ci = ds.dimensionIndex(Axes.CHANNEL) cv = ds.dimension(ci) dx = float(x_dim - xv) / 2.0 dy = float(y_dim - yv) / 2.0 dz = float(z_dim - zv) / 2.0 print '\t translation vector (dx, dy, dz) = (%f, %f, %f)' % ( dx, dy, dz) if (dx != 0) or (dy != 0) or (dz != 0): stk = Views.translate(ds, long(dx), long(dy), long(0), long(dz)) stk = Views.extendZero(stk) else: stk = Views.extendZero(ds.getImgPlus().getImg()) print '\t writing planes ', n = 0 plane = 1 byte_array = [] interval_view = Views.interval(stk, \ [Long(0), Long(0), Long(0), Long(0)], \ [Long(x_dim - 1), Long(y_dim - 1), Long(c_dim - 1), Long(z_dim - 1)]) cursor = interval_view.cursor() while cursor.hasNext(): n += 1 cursor.fwd() value = cursor.get().getInteger() bytes = DataTools.shortToBytes(value, big_endian) byte_array.extend(bytes) if n == pixels_per_plane: writer.saveBytes(plane - 1, byte_array) print '.', if ((plane) % 10) == 0: print '\n\t ', byte_array = [] plane += 1 n = 0 print ' ' writer.close() t = datetime.now() - t_start print '\twrote %i planes to %s in %i sec.' % (plane - 1, result_path, t.total_seconds()) print '... done.'
def readZeissHeader(self, infoStr): # This is incredibly difficult to get working as (date, time, voxels) are in different obscure places in lsm and czi # Furthermore, just trying to read the raw ome xls is futile # # parsing ome xml as a string and searching it with regular expression(re) does not work # it is beyond the scope of my work to figure this out # the fact that it does not work and there is little documentaiton is a pretty big waste of time # # get and parse xml to find date/time #fi = self.imp.getOriginalFileInfo(); # returns a FileInfo object #omexml = fi.description #omexml is a string #omexml = omexml.encode('utf-8') #omexml = omexml.replaceAll("[^\\x20-\\x7e]", "") # see: https://stackoverflow.com/questions/2599919/java-parsing-xml-document-gives-content-not-allowed-in-prolog-error # (1) try and search the ome xml like a string, this gives errors #docsPattern = '<AcquisitionDate>.*</AcquisitionDate>' #searchresult = re.search(docsPattern, omexml) #print 'searchresult:', searchresult.group(0) # 2) treat the ome xml like any other xml (because it's xml, right?) # well this raises errors too #omexml has <AcquisitionDate>2016-08-17T15:21:50</AcquisitionDate> #import xml.etree.ElementTree #e = xml.etree.ElementTree.fromstring(omexml).getroot() #print omexml #for atype in e.findall('AcquisitionDate'): # print 'AcquisitionDate:', atype #.get('foobar') # # if self.islsm: # lsm have date hidden in omeMeta.getImageAcquisitionDate(0) # this is copied from code at: https://gist.github.com/ctrueden/6282856 reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() #omeMeta.getImageAcquisitionDate(0) reader.setMetadataStore(omeMeta) reader.setId(self.filepath) #seriesCount = reader.getSeriesCount() dateTimeStr = omeMeta.getImageAcquisitionDate(0) #2016-08-17T16:36:26 reader.close() if dateTimeStr: self.dateStr, self.timeStr = dateTimeStr.toString().split('T') self.dateStr = bFixDate(self.dateStr) self.timeStr = bFixTime(self.timeStr) #bPrintLog('LSM date/time is: ' + self.dateStr + ' ' + self.timeStr, 3) else: bPrintLog('WARNING: did not get Zeiss date/time string') # lsm have voxels in infoStr for line in infoStr.split('\n'): #print line if line.find('VoxelSizeX') != -1: self.voxelx = float(line.split('=')[1]) if line.find('VoxelSizeY') != -1: self.voxely = float(line.split('=')[1]) if line.find('VoxelSizeZ') != -1: self.voxelz = float(line.split('=')[1]) if line.find('SizeC') != -1: self.numChannels = int(line.split('=')[1]) #if line.find('BitsPerPixel') and not line.startswith('Experiment') != -1: # 20170811, startswith is for czi # self.bitsPerPixel = int(line.split('=')[1]) if line.find('RecordingZoomX#1') != -1: self.zoom = int(line.split('=')[1]) if self.isczi: # czi has date/time in infoStr (lsm does not) for line in infoStr.split('\n'): if line.find('CreationDate #1') != -1: # w.t.f. is #1 referring to? lhs, rhs = line.split('=') rhs = rhs.replace(' ', ' ') if rhs.startswith(' '): rhs = rhs[1:-1] #print "lhs: '" + lhs + "'" + "rhs: '" + rhs + "'" if rhs.find('T') != -1: self.dateStr, self.timeStr = rhs.split('T') else: self.dateStr, self.timeStr = rhs.split(' ') self.dateStr = bFixDate(self.dateStr) self.timeStr = bFixTime(self.timeStr) #bPrintLog('CZI date/time is: ' + self.dateStr + ' ' + self.timeStr, 3) # .czi # <Pixels BigEndian="false" DimensionOrder="XYCZT" ID="Pixels:0" Interleaved="false" PhysicalSizeX="0.20756645602494875" PhysicalSizeXUnit="µm" PhysicalSizeY="0.20756645602494875" PhysicalSizeYUnit="µm" PhysicalSizeZ="0.75" PhysicalSizeZUnit="µm" SignificantBits="8" SizeC="1" SizeT="1" SizeX="1024" SizeY="1024" SizeZ="50" Type="uint8"> # czi have voxel in calibration self.voxelx = self.imp.getCalibration().pixelWidth; self.voxely = self.imp.getCalibration().pixelHeight; self.voxelz = self.imp.getCalibration().pixelDepth; #bPrintLog('readZeissHeader() read czi scale as: ' + str(self.voxelx) + ' ' + str(self.voxely) + ' ' + str(self.voxelz), 3) # CLEARING self.infoStr for CZI ... it was WAY to big to parse in Map Manager self.infoStr = ''
def process_time_points(root, files, outdir): '''Concatenate images and write ome.tiff file. If image contains already multiple time points just copy the image''' concat = 1 files.sort() options = ImporterOptions() options.setId(files[0]) options.setVirtual(1) image = BF.openImagePlus(options) image = image[0] if image.getNFrames() > 1: IJ.log(files[0] + " Contains multiple time points. Can only concatenate single time points! Don't do anything!") image.close() return width = image.getWidth() height = image.getHeight() for patt in pattern: outName = re.match(patt, os.path.basename(files[0])) if outName is None: continue if outdir is None: outfile = os.path.join(root, outName.group(1) + '.ome.tif') else: outfile = os.path.join(outdir, outName.group(1) + '.ome.tif') reader = ImageReader() reader.setMetadataStore(MetadataTools.createOMEXMLMetadata()) reader.setId(files[0]) timeInfo = [] omeOut = reader.getMetadataStore() omeOut = setUpXml(omeOut, image, files) reader.close() image.close() IJ.log ('Concatenates ' + os.path.join(root, outName.group(1) + '.ome.tif')) itime = 0 try: for ifile, fileName in enumerate(files): print fileName omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(fileName) #print omeMeta.getPlaneDeltaT(0,0) #print omeMeta.getPixelsTimeIncrement(0) if fileName.endswith('.czi'): if ifile == 0: T0 = omeMeta.getPlaneDeltaT(0,0).value() dT = omeMeta.getPlaneDeltaT(0,0).value() - T0 unit = omeMeta.getPlaneDeltaT(0,0).unit() else: timeInfo.append(getTimePoint(reader, omeMeta)) unit = omeMeta.getPixelsTimeIncrement(0).unit() try: dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2) except: dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds nrImages = reader.getImageCount() for i in range(0, reader.getImageCount()): try: omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages) except TypeError: omeOut.setPlaneDeltaT(Time(dT, unit),0, i + itime*nrImages) omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheT(NonNegativeInteger(itime), 0, i + itime*nrImages) omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages) itime = itime + 1 reader.close() IJ.showProgress(files.index(fileName), len(files)) try: incr = float(dT/(len(files)-1)) except: incr = 0 try: omeOut.setPixelsTimeIncrement(incr, 0) except TypeError: #new Bioformats >5.1.x omeOut.setPixelsTimeIncrement(Time(incr, unit),0) outfile = concatenateImagePlus(files, outfile) if outfile is not None: filein = RandomAccessInputStream(outfile) fileout = RandomAccessOutputStream(outfile) saver = TiffSaver(fileout, outfile) saver.overwriteComment(filein,omeOut.dumpXML()) fileout.close() filein.close() except: traceback.print_exc() finally: #close all possible open files try: reader.close() except: pass try: filein.close() except: pass try: fileout.close() except: