def processFile(filename, inDir, outDir, dichroics, mergeList): if mergeList is None: merge = False else: merge = True filenameExExt = os.path.splitext(filename)[0] filepath = inDir + filename # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(filepath) numChannels = reader.getSizeC() numSlices = reader.getSizeZ() numFrames = reader.getSizeT() seriesCount = reader.getSeriesCount() globalMetadata = reader.getGlobalMetadata() seriesMetadata = reader.getSeriesMetadata() objLensName = globalMetadata['- Objective Lens name #1'] areaRotation = float(seriesMetadata['area rotation #1']) acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1']) if 'regionInfo rotation #1' in seriesMetadata: regionInfoRotation = float(seriesMetadata['regionInfo rotation #1']) else: regionInfoRotation = float(0) totalRotation = areaRotation + regionInfoRotation physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) pxSizeX = physSizeX.value(UNITS.MICROM) pxSizeY = physSizeY.value(UNITS.MICROM) # log metadata IJ.log("\nMETADATA") #IJ.log("Filename: " + filepath) IJ.log("Number of series: " + str(seriesCount)) IJ.log("Number of channels: " + str(numChannels)) IJ.log("Number of frames: " + str(numFrames)) IJ.log("Number of slices: " + str(numSlices)) IJ.log("Objective lens: " + objLensName) IJ.log("FOV rotation: " + str(areaRotation)) IJ.log("ROI rotation: " + str(regionInfoRotation)) IJ.log("Total rotation: " + str(totalRotation)) IJ.log("Pixel size:") IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol()) IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol()) if merge: tifDir = outDir + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated temporary folder: " + tifDir + "\n") else: IJ.log("Unable to create temporary folder!\n") else: tifDir = outDir + filenameExExt + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated subfolder: " + tifDir + "\n") else: IJ.log("\nSubfolder " + tifDir + " already exists.\n") # correct images tifFilePaths = [] for i in range(numChannels): ip = extractChannel(oirFile=filepath, ch=i) if dichroics[i] == "DM1": IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.") else: offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]]) xom = offsets['x'] yom = offsets['y'] if abs(totalRotation) > 0.1: rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation) xom = rotOff['x'] yom = rotOff['y'] xop = int(round(xom/pxSizeX)) yop = int(round(yom/pxSizeY)) IJ.log("Channel " + str(i+1) + " offsets") IJ.log("\t\tMicrometres") IJ.log("\t\t\t\tx = " + str(xom)) IJ.log("\t\t\t\ty = " + str(yom)) IJ.log("\t\tPixels") IJ.log("\t\t\t\tx = " + str(xop)) IJ.log("\t\t\t\ty = " + str(yop)) IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack") tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif" tifFilePaths.append(tifFilePath) if os.path.exists(tifFilePath): IJ.log("\nOutput file exists: " + tifFilePath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") return FileSaver(ip).saveAsTiff(tifFilePath) if merge: max_list = [] for i in range(len(mergeList)): if mergeList[i] != None: mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]]) channel = mergeList[i]#https://python.hotexamples.com/examples/ij.plugin/RGBStackMerge/mergeChannels/python-rgbstackmerge-mergechannels-method-examples.html projector = ZProjector(channel) projector.setMethod(ZProjector.MAX_METHOD) projector.doProjection() max_list.append(projector.getProjection()) merged = RGBStackMerge.mergeChannels(mergeList, False) merged_max = RGBStackMerge.mergeChannels(max_list, False) mergedChannelFilepath = outDir + filenameExExt + ".tif" maxMergedChannelFilepath = outDir + filenameExExt + "_max.tif" if os.path.exists(mergedChannelFilepath): IJ.log("\nOutput file exists: " + mergedChannelFilepath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") FileSaver(merged).saveAsTiff(mergedChannelFilepath) FileSaver(merged_max).saveAsTiff(maxMergedChannelFilepath) for tf in tifFilePaths: os.remove(tf) os.rmdir(tifDir) IJ.log("\nFinished processing file:\n" + filepath + "\n") if merge: IJ.log("Image file with channels aligned:\n" + outDir + filenameExExt + ".tif\n") else: IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
def processFile(): # start logging IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n") # ask user for file ofd = OpenDialog("Choose a file", None) filename = ofd.getFileName() if filename is None: IJ.log("User canceled the dialog!\nImage processing canceled!\n") return directory = ofd.getDirectory() filepath = directory + filename IJ.log("File path: " + filepath) if not filename.endswith(".oir"): IJ.log("Not an Olympus (.oir) file.\nNo image to process.\n") return filenameExExt = os.path.splitext(filename)[0] # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(filepath) numChannels = reader.getSizeC() numSlices = reader.getSizeZ() numFrames = reader.getSizeT() seriesCount = reader.getSeriesCount() globalMetadata = reader.getGlobalMetadata() seriesMetadata = reader.getSeriesMetadata() objLensName = globalMetadata['- Objective Lens name #1'] areaRotation = float(seriesMetadata['area rotation #1']) acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1']) if 'regionInfo rotation #1' in seriesMetadata: regionInfoRotation = float(seriesMetadata['regionInfo rotation #1']) else: regionInfoRotation = float(0) totalRotation = areaRotation + regionInfoRotation physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) pxSizeX = physSizeX.value(UNITS.MICROM) pxSizeY = physSizeY.value(UNITS.MICROM) # log metadata IJ.log("\nMETADATA") #IJ.log("Filename: " + filepath) IJ.log("Number of series: " + str(seriesCount)) IJ.log("Number of channels: " + str(numChannels)) IJ.log("Number of frames: " + str(numFrames)) IJ.log("Number of slices: " + str(numSlices)) IJ.log("Objective lens: " + objLensName) IJ.log("FOV rotation: " + str(areaRotation)) IJ.log("ROI rotation: " + str(regionInfoRotation)) IJ.log("Total rotation: " + str(totalRotation)) IJ.log("Pixel size:") IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol()) IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol()) # ask user to identify dichroic mirror used for each channel gdDM = GenericDialog("Dichroic mirrors") DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] for i in range(numChannels): gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0]) gdDM.addCheckbox("Merge channels", False) gdDM.showDialog() if gdDM.wasCanceled(): IJ.log("User canceled the dialog!\nImage processing canceled!\n") return dichroics = [] for i in range(numChannels): dichroics.append(gdDM.getNextChoice()) merge = gdDM.getNextBoolean() IJ.log("\nUser selected dichroic mirrors") for i in range(numChannels): IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i]) if merge: channels = [] chDict = {} for i in range(numChannels): chName = "Channel"+str(i+1) channels.append(chName) chDict[chName] = i channels.append("NONE") colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"] gdMerge = GenericDialog("Merge channels") for c in colourChoices: gdMerge.addChoice(c + ":", channels, channels[numChannels]) gdMerge.showDialog() if gdMerge.wasCanceled(): IJ.log("User canceled the dialog!\nImage processing canceled!\n") return IJ.log("\nUser selected channel colours") mergeList = [] for i in range(len(colourChoices)): ch = gdMerge.getNextChoice() if ch == "NONE": mergeList.append(None) else: mergeList.append(chDict[ch]) IJ.log("\t\t" + colourChoices[i] + ": " + ch) # ask user for an output directory dc = DirectoryChooser("Choose folder for output") od = dc.getDirectory() if od is None: IJ.log("User canceled the dialog!\nImage processing canceled!\n") return if merge: tifDir = od + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated temporary folder: " + tifDir + "\n") else: IJ.log("Unable to create temporary folder!\n") else: tifDir = od + filenameExExt + "/" if not os.path.exists(tifDir): os.makedirs(tifDir) IJ.log("\nCreated subfolder: " + tifDir + "\n") else: IJ.log("\nSubfolder " + tifDir + " already exists") # correct images tifFilePaths = [] for i in range(numChannels): ip = extractChannel(oirFile=filepath, ch=i) if dichroics[i] == "DM1": IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.") else: offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]]) xom = offsets['x'] yom = offsets['y'] if abs(totalRotation) > 0.1: rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation) xom = rotOff['x'] yom = rotOff['y'] xop = int(round(xom/pxSizeX)) yop = int(round(yom/pxSizeY)) IJ.log("Channel " + str(i+1) + " offsets") IJ.log("\t\tMicrometres") IJ.log("\t\t\t\tx = " + str(xom)) IJ.log("\t\t\t\ty = " + str(yom)) IJ.log("\t\tPixels") IJ.log("\t\t\t\tx = " + str(xop)) IJ.log("\t\t\t\ty = " + str(yop)) IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack") tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif" tifFilePaths.append(tifFilePath) if os.path.exists(tifFilePath): IJ.log("\nOutput file exists: " + tifFilePath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") return FileSaver(ip).saveAsTiff(tifFilePath) if merge: for i in range(len(mergeList)): if mergeList[i] != None: mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]]) merged = RGBStackMerge.mergeChannels(mergeList, False) mergedChannelFilepath = od + filenameExExt + ".tif" if os.path.exists(mergedChannelFilepath): IJ.log("\nOutput file exists: " + mergedChannelFilepath) IJ.log("Rerun plugin choosing a different output folder") IJ.log("or delete file and then rerun plugin.") IJ.log("Image processing terminated!\n") FileSaver(merged).saveAsTiff(mergedChannelFilepath) for tf in tifFilePaths: os.remove(tf) os.rmdir(tifDir) IJ.log("\nFinished processing file:\n" + filepath + "\n") if merge: IJ.log("Image file with channels aligned:\n" + od + filenameExExt + ".tif\n") else: IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
def get_ome_metadata(source, imagenames): """Get the stage coordinates and calibration from the ome-xml for a given list of images Arguments: source {string} -- Path to the images imagenames {list} -- list of images filenames Returns: a tuple that contains: dimensions {int} -- number of dimensions (2D or 3D) stage_coordinates_x {list} -- the abosolute stage x-coordinates from ome-xml metadata stage_coordinates_y {list} -- the abosolute stage y-coordinates from ome-xml metadata stage_coordinates_z {list} -- the abosolute stage z-coordinates from ome-xml metadata relative_coordinates_x_px {list} -- the relative stage x-coordinates in px relative_coordinates_y_px {list} -- the relative stage y-coordinates in px relative_coordinates_z_px {list} -- the relative stage z-coordinates in px image_calibration {list} -- x,y,z image calibration in unit/px calibration_unit {string} -- image calibration unit image_dimensions_czt {list} -- number of images in dimensions c,z,t """ # open an array to store the abosolute stage coordinates from metadata stage_coordinates_x = [] stage_coordinates_y = [] stage_coordinates_z = [] for counter, image in enumerate(imagenames): # parse metadata reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(source + str(image)) # get hyperstack dimensions from the first image if counter == 0: frame_size_x = reader.getSizeX() frame_size_y = reader.getSizeY() frame_size_z = reader.getSizeZ() frame_size_c = reader.getSizeC() frame_size_t = reader.getSizeT() # note the dimensions if frame_size_z == 1: dimensions = 2 if frame_size_z > 1: dimensions = 3 # get the physical calibration for the first image series physSizeX = omeMeta.getPixelsPhysicalSizeX(0) physSizeY = omeMeta.getPixelsPhysicalSizeY(0) physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0) # workaround to get the z-interval if physSizeZ.value() returns None. z_interval = 1 if physSizeZ is not None: z_interval = physSizeZ.value() if frame_size_z > 1 and physSizeZ is None: print "no z calibration found, trying to recover" first_plane = omeMeta.getPlanePositionZ(0, 0) next_plane_imagenumber = frame_size_c + frame_size_t - 1 second_plane = omeMeta.getPlanePositionZ( 0, next_plane_imagenumber) z_interval = abs( abs(first_plane.value()) - abs(second_plane.value())) print "z-interval seems to be: ", z_interval # create an image calibration image_calibration = [ physSizeX.value(), physSizeY.value(), z_interval ] calibration_unit = physSizeX.unit().getSymbol() image_dimensions_czt = [frame_size_c, frame_size_z, frame_size_t] reader.close() # get the plane position in calibrated units current_position_x = omeMeta.getPlanePositionX(0, 0) current_position_y = omeMeta.getPlanePositionY(0, 0) current_position_z = omeMeta.getPlanePositionZ(0, 0) # get the absolute stage positions and store them pos_x = current_position_x.value() pos_y = current_position_y.value() if current_position_z is None: print "the z-position is missing in the ome-xml metadata." pos_z = 1.0 else: pos_z = current_position_z.value() stage_coordinates_x.append(pos_x) stage_coordinates_y.append(pos_y) stage_coordinates_z.append(pos_z) # calculate the store the relative stage movements in px (for the grid/collection stitcher) relative_coordinates_x_px = [] relative_coordinates_y_px = [] relative_coordinates_z_px = [] for i in range(len(stage_coordinates_x)): rel_pos_x = (stage_coordinates_x[i] - stage_coordinates_x[0]) / physSizeX.value() rel_pos_y = (stage_coordinates_y[i] - stage_coordinates_y[0]) / physSizeY.value() rel_pos_z = (stage_coordinates_z[i] - stage_coordinates_z[0]) / z_interval relative_coordinates_x_px.append(rel_pos_x) relative_coordinates_y_px.append(rel_pos_y) relative_coordinates_z_px.append(rel_pos_z) return (dimensions, stage_coordinates_x, stage_coordinates_y, stage_coordinates_z, relative_coordinates_x_px, relative_coordinates_y_px, relative_coordinates_z_px, image_calibration, calibration_unit, image_dimensions_czt)
def run(): t_start = datetime.now() image_paths = glob(os.path.join(str(import_dir.getPath()), '*tif')) print '\tread image metadata' reader = ImageReader() in_meta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(in_meta) x_dims = [] y_dims = [] z_dims = [] c_dims = [] t_dims = [] eff = [] spp = [] for image_path in image_paths: print '\t parse %s' % (image_path) reader.setId(image_path) x_dims.append(reader.getSizeX()) y_dims.append(reader.getSizeY()) z_dims.append(reader.getSizeZ()) c_dims.append(reader.getSizeC()) t_dims.append(reader.getSizeT()) eff.append(reader.imageCount / z_dims[-1] / t_dims[-1]) spp.append(reader.getSizeC() / eff[-1]) format = FormatTools.getPixelTypeString(reader.getPixelType()) series = reader.getSeries() big_endian = Boolean.FALSE order = reader.getDimensionOrder() reader.close() # Compute the dimensions of the output file x_dim = max(x_dims) y_dim = max(y_dims) z_dim = max(z_dims) c_dim = max(c_dims) t_dim = max(t_dims) print '\t series: %i' % series print '\t format: %s' % format print '\t dimension order: %s' % order print '\t x: %s -> %i' % (x_dims, x_dim) print '\t y: %s -> %i' % (y_dims, y_dim) print '\t z: %s -> %i' % (z_dims, z_dim) print '\t c: %s -> %i' % (c_dims, c_dim) print '\t t: %s -> %i' % (t_dims, t_dim) print '\t effective size c: %s' % eff print '\t samples per pixel: %s' % spp # Get the time dimension from the number of input files t_dim = len(image_paths) # TODO: Tried to work out the order with Axes class, got something weird though. dimensions = [Short(x_dim), Short(y_dim), Short(c_dim), Short(z_dim)] pixels_per_plane = x_dim * y_dim # Assemble the metadata for the output file out_meta = MetadataTools.createOMEXMLMetadata() out_meta.setImageID(MetadataTools.createLSID('Image', series), series) out_meta.setPixelsID(MetadataTools.createLSID('Pixels', series), series) out_meta.setPixelsBinDataBigEndian(Boolean.TRUE, 0, 0) out_meta.setPixelsDimensionOrder(DimensionOrder.fromString(order), series) out_meta.setPixelsType(PixelType.fromString(format), series) out_meta.setPixelsSizeX(PositiveInteger(x_dim), series) out_meta.setPixelsSizeY(PositiveInteger(y_dim), series) out_meta.setPixelsSizeZ(PositiveInteger(z_dim), series) out_meta.setPixelsSizeC(PositiveInteger(c_dim), series) out_meta.setPixelsSizeT(PositiveInteger(t_dim), series) for c in range(c_dim): out_meta.setChannelID(MetadataTools.createLSID('Channel', series, c), series, c) out_meta.setChannelSamplesPerPixel(PositiveInteger(1), series, c) # Initialize the BF writer result_path = os.path.join(result_dir.getPath(), result_name) writer = ImageWriter() writer.setMetadataRetrieve(out_meta) writer.setId(result_path) print '\tcreated to %s' % (result_path) # Write the stacks into the output file N = len(image_paths) for i, image_path in enumerate(image_paths): status.showStatus(i, N, "catenating %i of %i time-points" % (i, N)) print '\t processing %s' % (image_path) ds = io.open(image_path) xi = ds.dimensionIndex(Axes.X) xv = ds.dimension(xi) yi = ds.dimensionIndex(Axes.Y) yv = ds.dimension(yi) zi = ds.dimensionIndex(Axes.Z) zv = ds.dimension(zi) ti = ds.dimensionIndex(Axes.TIME) tv = ds.dimension(ti) ci = ds.dimensionIndex(Axes.CHANNEL) cv = ds.dimension(ci) dx = float(x_dim - xv) / 2.0 dy = float(y_dim - yv) / 2.0 dz = float(z_dim - zv) / 2.0 print '\t translation vector (dx, dy, dz) = (%f, %f, %f)' % ( dx, dy, dz) if (dx != 0) or (dy != 0) or (dz != 0): stk = Views.translate(ds, long(dx), long(dy), long(0), long(dz)) stk = Views.extendZero(stk) else: stk = Views.extendZero(ds.getImgPlus().getImg()) print '\t writing planes ', n = 0 plane = 1 byte_array = [] interval_view = Views.interval(stk, \ [Long(0), Long(0), Long(0), Long(0)], \ [Long(x_dim - 1), Long(y_dim - 1), Long(c_dim - 1), Long(z_dim - 1)]) cursor = interval_view.cursor() while cursor.hasNext(): n += 1 cursor.fwd() value = cursor.get().getInteger() bytes = DataTools.shortToBytes(value, big_endian) byte_array.extend(bytes) if n == pixels_per_plane: writer.saveBytes(plane - 1, byte_array) print '.', if ((plane) % 10) == 0: print '\n\t ', byte_array = [] plane += 1 n = 0 print ' ' writer.close() t = datetime.now() - t_start print '\twrote %i planes to %s in %i sec.' % (plane - 1, result_path, t.total_seconds()) print '... done.'
def process(filename): TEMPLATE_GENERAL = "The data was acquired on a {ID} microscope, using a {objective} {NA} NA objective. The pixel size was {pxx_microns} microns. " TEMPLATE_CHANNEL = "The excitation and emission wavelengths for channel {ch} were {ex} and {em} and the {exposureTime} was {et}. " TEMPLATE_3D = "A series of slices was collected with a step size of {pzz_microns} microns. " TEMPLATE_TIME = "Images were acquired with a time interval of {timeInterval}. " BLURB = "" # Admin stuff import sys from org.scijava.ui.swing.console import LoggingPanel logger.addLogListener( LoggingPanel(context) ); logger.info(filename.getAbsolutePath()) # Get a BioFormats reader from loci.formats import ImageReader ir = ImageReader() # Adapted from https://github.com/ome/bioformats/blob/develop/components/formats-gpl/utils/GetPhysicalMetadata.java m = omeservice.createOMEXMLMetadata() ir.setMetadataStore(m) ir.setId(filename.getAbsolutePath()) # Some checks ninstruments = m.getInstrumentCount() if ninstruments > 1: logger.error("More than one instrument found. Automatic generation will not work...") if ninstruments == 0: logger.error("No instrument metadata found! Automatic generation will not work...") # Manufacturer and modalities try: ID = m.getMicroscopeManufacturer(0) except: logger.error(sys.exc_info()[0]) ID = None if ID == None: ff = str(ir.getFormat()) if "Zeiss" in ff: ID="Zeiss" elif "Nikon" in ff: ID="Nikon" tID = ir.getMetadataValue("m_sMicroscopePhysFullName") if tID is not None: ID = tID elif "Olympus" in ff: ID="Olympus" else: ID="" for ic in range(ir.getSizeC()): mode = m.getChannelAcquisitionMode(0,ic) if ic>0 and mode != mode0: logger.warn("WARNING : Not all channels were acquired with the same modality..") else: mode0=mode if mode == None: mode_with_spaces = "UNKNOWN" else: mode_with_spaces = "" if str(mode) == "TIRF": mode_with_spaces = str(mode) else: for letter in str(mode): if letter.isupper(): mode_with_spaces += " "+letter.lower() else: mode_with_spaces += letter ID+=" "+str(mode_with_spaces.strip()) if ninstruments == 1: nobjectives = m.getObjectiveCount(0) if nobjectives > 1: logger.error("More than one objective found. Automatic generation will generate information for the first objective only.") objective = "UNKNOWN" if ninstruments == 1 and nobjectives >0: try: magnification1 = m.getObjectiveNominalMagnification(0,0) if magnification1 != None: objective = "{:.0f}x".format(magnification1) except: logger.error(sys.exc_info()[0]) msg = "Could not extract information about the objective! The image might be missing some crucial metadata." logger.error(msg) if objective == "UNKNOWN": if "Nikon" in ff: objective0 = str(ir.getMetadataValue("sObjective")) if objective0 is not None: objective = objective0 NA = "UNKNOWN" if ninstruments == 1 and nobjectives >0: try: NA1 = m.getObjectiveLensNA(0,0) if NA1 != None: NA = str(NA1) except: msg = "Could not extract information about the objective! The image might be missing some crucial metadata." logger.error(msg) NAm = ir.getMetadataValue("Numerical Aperture") if NA=="UNKNOWN" and "Nikon" in ff and NAm is not None: NA = str(NAm) #else: # HT=ir.getGlobalMetadata() # for k in HT.keys(): # print "{}={}".format(k,HT.get(k)) # Pixel size nimages = m.getImageCount() logger.info("Found {} images".format(nimages)) from ome.units import UNITS pxx_microns = "UNKNOWN" if ninstruments==1 and nobjectives>0: try: pxx_microns = "{:.2f}".format(m.getPixelsPhysicalSizeX(0).value(UNITS.MICROMETER)) except: logger.error(sys.exc_info()[0]) msg = "Could not extract physical pixel size! The image might be missing some crucial metadata." logger.error(msg) # Is it 3D? is3D = ir.getSizeZ()>1 pzz_microns = "UNKNOWN" if ninstruments==1 and nobjectives>0: try: pzz_microns = "{:.2f}".format(m.getPixelsPhysicalSizeZ(0).value(UNITS.MICROMETER)) except: logger.error(sys.exc_info()[0]) msg = "This image is 3D but I could not extract physical step size! The image might be missing some crucial metadata." logger.error(msg) # TODO Is it a time series? # GENERAL BLURB GENERATION BLURB += TEMPLATE_GENERAL.format(ID=ID, objective=objective, NA=NA, pxx_microns=pxx_microns) if is3D: BLURB += TEMPLATE_3D.format(pzz_microns=pzz_microns) # Extract channel information for ic in range(ir.getSizeC()): try: ex0 = m.getChannelExcitationWavelength(0,ic) if ex0==None: ex = "UNKNOWN" else: ex="{:.0f} nm".format(ex0.value(UNITS.NANOMETER)) except: logger.error(sys.exc_info()[0]) logger.error("Wasn't able to extract channel wavelength information for channel {}.".format(ic+1)) continue try: em0 = m.getChannelEmissionWavelength(0,ic) if em0==None: em = "UNKNOWN" else: em="{:.0f} nm".format(em0.value(UNITS.NANOMETER)) except: logger.error(sys.exc_info()[0]) logger.error("Wasn't able to extract channel wavelength information for channel {}.".format(ic+1)) continue #try: ix = ir.getIndex(0, ic, 0) # NOTE : First z plane, first timepoint only et = m.getPlaneExposureTime(0,ix) if et==None: et = "UNKNOWN" else: etms = et.value(UNITS.MILLISECOND) if "CZI" in ff: # TODO Check if error is across other images logger.warn("The exposure time was divided by 1000 to account for ms mistaken as s in CZI files") etms = etms/1000 if etms<1000: et=str("{:.2f} ms".format(etms)) else: et=str("{} s".format(etms/1000)) if etms/1000>600: logger.warn("Exposure time for channel {} is {}s. That's longer than 10m, please double check metadata to make sure it's correct".format(ic+1,etms/1000)) BLURB += TEMPLATE_CHANNEL.format(ch=ic+1, ex=ex, exposureTime="exposure time", et=et, em=em) #except: # logger.error("Wasn't able to extract channel {} exposure time information.".format(ic+1)) return BLURB
def nucleus_detection(infile, nucleus_channel, stacksize, animation): # Detect nucleus with 3d log filters fullpath = infile infile = filename(infile) IJ.log("Start Segmentation " + str(infile)) # First get Nb Stacks reader = ImageReader() omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(fullpath) default_options = "stack_order=XYCZT color_mode=Composite view=Hyperstack specify_range c_begin=" + \ str(nucleus_channel) + " c_end=" + str(nucleus_channel) + \ " c_step=1 open=[" + fullpath + "]" NbStack = reader.getSizeZ() reader.close() output = re.sub('.ids', '.csv', infile) with open(os.path.join(folder5, output), 'wb') as outfile: DETECTwriter = csv.writer(outfile, delimiter=',') DETECTwriter.writerow( ['spotID', 'roundID', 'X', 'Y', 'Z', 'QUALITY', 'SNR', 'INTENSITY']) rounds = NbStack // stacksize spotID = 1 for roundid in xrange(1, rounds + 2): # Process stacksize by stacksize otherwise crash because too many spots Zstart = (stacksize * roundid - stacksize + 1) Zend = (stacksize * roundid) if(Zend > NbStack): Zend = NbStack % stacksize + (roundid - 1) * stacksize IJ.log("Round:" + str(roundid) + ' Zstart=' + str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack)) IJ.run("Bio-Formats Importer", default_options + " z_begin=" + str(Zstart) + " z_end=" + str(Zend) + " z_step=1") imp = IJ.getImage() imp.show() cal = imp.getCalibration() model = Model() settings = Settings() settings.setFrom(imp) # Configure detector - Manually determined as best settings.detectorFactory = LogDetectorFactory() settings.detectorSettings = { 'DO_SUBPIXEL_LOCALIZATION': True, 'RADIUS': 5.5, 'TARGET_CHANNEL': 1, 'THRESHOLD': 50.0, 'DO_MEDIAN_FILTERING': False, } filter1 = FeatureFilter('QUALITY', 1, True) settings.addSpotFilter(filter1) settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory()) settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory()) settings.trackerFactory = SparseLAPTrackerFactory() settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() trackmate = TrackMate(model, settings) ok = trackmate.checkInput() if not ok: sys.exit(str(trackmate.getErrorMessage())) try: ok = trackmate.process() except: IJ.log("Nothing detected, Round:" + str(roundid) + ' Zstart=' + str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack)) IJ.selectWindow(infile) IJ.run('Close') continue else: if animation: # For plotting purpose only imp.setPosition(1, 1, imp.getNFrames()) imp.getProcessor().setMinAndMax(0, 4000) selectionModel = SelectionModel(model) displayer = HyperStackDisplayer(model, selectionModel, imp) displayer.render() displayer.refresh() for i in xrange(1, imp.getNSlices() + 1): imp.setSlice(i) time.sleep(0.05) IJ.selectWindow(infile) IJ.run('Close') spots = model.getSpots() spotIt = spots.iterator(0, False) sid = [] sroundid = [] x = [] y = [] z = [] q = [] snr = [] intensity = [] for spot in spotIt: sid.append(spotID) spotID = spotID + 1 sroundid.append(roundid) x.append(spot.getFeature('POSITION_X')) y.append(spot.getFeature('POSITION_Y')) q.append(spot.getFeature('QUALITY')) snr.append(spot.getFeature('SNR')) intensity.append(spot.getFeature('MEAN_INTENSITY')) # Correct Z position correct_z = spot.getFeature( 'POSITION_Z') + (roundid - 1) * float(stacksize) * cal.pixelDepth z.append(correct_z) with open(os.path.join(folder5, output), 'ab') as outfile: DETECTwriter = csv.writer(outfile, delimiter=',') Sdata = zip(sid, sroundid, x, y, z, q, snr, intensity) for Srow in Sdata: DETECTwriter.writerow(Srow)