예제 #1
0
def getFrameIntervalFromImage(image_path):
    from loci.formats import ImageReader
    from loci.formats import MetadataTools
    r = ImageReader()
    meta = MetadataTools.createOMEXMLMetadata()
    r.setMetadataStore(meta)
    r.setId(image_path)
    frame_interval = meta.getPixelsTimeIncrement(0).value()
    log("Detected frame rate: %s (%s)" % (frame_interval, image_path))
    return frame_interval
def Z1_metadata(sourcefile):
	# Access header of Z1 lighsheet data to determine nb views
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(sourcefile)
    seriesCount = reader.getSeriesCount()
    reader.close()
    return seriesCount
def DirList(baseDir):
	r = ImageReader()
	imgStats = {}
	for root, dirs, files in os.walk(str(baseDir)):
		for f1 in files:
			if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith(".jpeg"):
				id = root + "/" +  f1
				r.setId(id)
				if r is None:
					print "Couldn\'t open image from file:", id
					continue
				w = r.getSizeX()
				h = r.getSizeY()
				imgStats[str(w) + "_" + str(h)] = imgStats.get(str(w) + "_" + str(h), 0)+1
				IJ.log("Found image: " + str(id))
				#counter += 1
	r.close()
	#print summary
	summary = ''
	for k, v in imgStats.iteritems():
		dim = k.split("_")
		ratio = float(dim[0])/float(dim[1])
		IJ.log("Found " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2)))
		summary = summary + "\nFound " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2))
	return summary
예제 #4
0
    if os.path.isdir(os.path.join(root_folder, content))
]

for subfolder in subfolders:
    subfolder_contents = os.listdir(subfolder)
    tiff_files = [
        content for content in subfolder_contents
        if os.path.splitext(content)[1] == '.tif'
    ]
    for tiff_file in tiff_files:
        image_path = os.path.join(subfolder, tiff_file)
        print("input path = " + image_path)
        bfimp = bf.openImagePlus(image_path)
        imp = bfimp[0]

        reader = ImageReader()
        ome_meta = MetadataTools.createOMEXMLMetadata()
        reader.setMetadataStore(ome_meta)
        reader.setId(image_path)
        reader.close()
        if imp.getNFrames() > imp.getNSlices():
            try:
                frame_interval = ome_meta.getPixelsTimeIncrement(0).value()
                frame_unit = ome_meta.getPixelsTimeIncrement(
                    0).unit().getSymbol()
            except:
                frame_interval = acquisition_frame_interval_s
                frame_unit = 's'
        else:
            try:
                z_interval = ome_meta.getPixelsPhysicalSizeZ(0).value()
예제 #5
0
def processMovie(root, files, outfile):
    """Concatenate images and write ome.tiff file.
    If image contains already multiple time points just copy the image"""

    files.sort()

    options = ImporterOptions()
    options.setId(files[0])
    options.setVirtual(1)

    image = BF.openImagePlus(options)
    image = image[0]
    if image.getNFrames() > 1:
        msg = ("%s Contains multiple time points. Can only concatenate"
               " single time points!" %files[0])
        raise RuntimeError(msg)
        image.close()

    reader = ImageReader()
    reader.setMetadataStore(MetadataTools.createOMEXMLMetadata())
    reader.setId(files[0])
    timeInfo = []
    omeOut = reader.getMetadataStore()
    omeOut = setUpXml(omeOut, image, files)
    reader.close()
    image.close()
    itime = 0

    for fileName in files:
        omeMeta = MetadataTools.createOMEXMLMetadata()
        reader.setMetadataStore(omeMeta)
        reader.setId(fileName)
        timeInfo.append(getTimePoint(reader, omeMeta))

        nrImages = reader.getImageCount()
        for i in range(0, reader.getImageCount()):
            try:
                dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2)
            except:
                dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds
            omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages)
            omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages)
            omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages)
            omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheT(omeOut.getPlaneTheT(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages)
        itime = itime + 1
        reader.close()

        IJ.showProgress(files.index(fileName), len(files))

    try:
        omeOut.setPixelsTimeIncrement(float(dT/(len(files)-1)), 0)
    except:
        omeOut.setPixelsTimeIncrement(0, 0)

    if len(files) <= 1:
        raise RuntimeError('Found only one file. Nothing to concatenate')

    outfile = concatenateImagePlus(files, outfile)
    filein = RandomAccessInputStream(outfile)
    fileout = RandomAccessOutputStream(outfile)
    saver = TiffSaver(fileout, outfile)
    saver.overwriteComment(filein, omeOut.dumpXML())
    fileout.close()
    filein.close()
예제 #6
0
def readczi(imagefile,
            stitchtiles=True,
            setflatres=False,
            readpylevel=0,
            setconcat=True,
            openallseries=True,
            showomexml=False,
            attach=False,
            autoscale=True):

    log.info('Filename : ' + imagefile)

    metainfo = {}
    # checking for thr file Extension
    metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile))
    log.info('Detected File Extension : ' + metainfo['Extension'])

    # initialize the reader and get the OME metadata
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    #metainfo['ImageCount_OME'] = omeMeta.getImageCount()
    reader.setMetadataStore(omeMeta)
    reader.setId(imagefile)
    metainfo['SeriesCount_BF'] = reader.getSeriesCount()
    reader.close()

    # get the scaling for XYZ
    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

    if physSizeX is not None:
        metainfo['ScaleX'] = round(physSizeX.value(), 3)
        metainfo['ScaleY'] = round(physSizeX.value(), 3)

    if physSizeX is None:
        metainfo['ScaleX'] = None
        metainfo['ScaleY'] = None

    if physSizeZ is not None:
        metainfo['ScaleZ'] = round(physSizeZ.value(), 3)
    if physSizeZ is None:
        metainfo['ScaleZ'] = None

    options = DynamicMetadataOptions()
    options.setBoolean("zeissczi.autostitch", stitchtiles)
    options.setBoolean("zeissczi.attachments", attach)

    czireader = ZeissCZIReader()
    czireader.setFlattenedResolutions(setflatres)
    czireader.setMetadataOptions(options)
    czireader.setId(imagefile)

    # Set the preferences in the ImageJ plugin
    # Note although these preferences are applied, they are not refreshed in the UI
    Prefs.set("bioformats.zeissczi.allow.autostitch", str(stitchtiles).lower())
    Prefs.set("bioformats.zeissczi.include.attachments", str(attach).lower())

    # metainfo = {}
    metainfo['rescount'] = czireader.getResolutionCount()
    metainfo['SeriesCount_CZI'] = czireader.getSeriesCount()
    #metainfo['flatres'] = czireader.hasFlattenedResolutions()
    #metainfo['getreslevel'] = czireader.getResolution()

    # Dimensions
    metainfo['SizeT'] = czireader.getSizeT()
    metainfo['SizeZ'] = czireader.getSizeZ()
    metainfo['SizeC'] = czireader.getSizeC()
    metainfo['SizeX'] = czireader.getSizeX()
    metainfo['SizeY'] = czireader.getSizeY()

    # check for autostitching and possibility to read attachment
    metainfo['AllowAutoStitching'] = czireader.allowAutostitching()
    metainfo['CanReadAttachments'] = czireader.canReadAttachments()

    # read in and display ImagePlus(es) with arguments
    options = ImporterOptions()
    options.setOpenAllSeries(openallseries)
    options.setShowOMEXML(showomexml)
    options.setConcatenate(setconcat)
    options.setAutoscale(autoscale)
    options.setId(imagefile)

    # open the ImgPlus
    imps = BF.openImagePlus(options)
    metainfo['Pyramid Level Output'] = readpylevel + 1

    try:
        imp = imps[readpylevel]
        pylevelout = metainfo['SeriesCount_CZI']
    except:
        # fallback option
        log.info('PyLevel=' + str(readpylevel) + ' does not exist.')
        log.info('Using Pyramid Level = 0 as fallback.')
        imp = imps[0]
        pylevelout = 0
        metainfo['Pyramid Level Output'] = pylevelout

    # get the stack and some info
    imgstack = imp.getImageStack()
    metainfo['Output Slices'] = imgstack.getSize()
    metainfo['Output SizeX'] = imgstack.getWidth()
    metainfo['Output SizeY'] = imgstack.getHeight()

    # calc scaling in case of pyramid
    scale = float(metainfo['SizeX']) / float(metainfo['Output SizeX'])
    metainfo['Pyramid Scale Factor'] = scale
    metainfo['ScaleX Output'] = metainfo['ScaleX'] * scale
    metainfo['ScaleY Output'] = metainfo['ScaleY'] * scale

    # set the correct scaling
    imp = MiscTools.setscale(imp, scaleX=metainfo['ScaleX Output'],
                             scaleY=metainfo['ScaleX Output'],
                             scaleZ=metainfo['ScaleZ'],
                             unit="micron")

    # close czireader
    czireader.close()

    return imp, metainfo
예제 #7
0
    def openfile(imagefile,
                 stitchtiles=True,
                 setflatres=False,
                 readpylevel=0,
                 setconcat=True,
                 openallseries=True,
                 showomexml=False,
                 attach=False,
                 autoscale=True,
                 imageID=0):

        # stitchtiles = option of CZIReader to return the raw tiles as
        # individual series rather than the auto-stitched images

        metainfo = {}
        # checking for thr file Extension
        metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile))

        # initialite the reader and get the OME metadata
        reader = ImageReader()
        omeMeta = MetadataTools.createOMEXMLMetadata()
        metainfo['ImageCount_OME'] = omeMeta.getImageCount()
        reader.setMetadataStore(omeMeta)
        reader.setId(imagefile)
        metainfo['SeriesCount_BF'] = reader.getSeriesCount()
        reader.close()

        # read dimensions TZCXY from OME metadata
        metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue()
        metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue()
        metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue()
        metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue()
        metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue()

        # store info about stack
        if metainfo['SizeZ'] == 1:
            metainfo['is3d'] = False
        elif metainfo['SizeZ'] > 1:
            metainfo['is3d'] = True

        # get the scaling for XYZ
        physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
        physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
        physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

        if physSizeX is not None:
            metainfo['ScaleX'] = round(physSizeX.value(), 3)
            metainfo['ScaleY'] = round(physSizeX.value(), 3)
        if physSizeX is None:
            metainfo['ScaleX'] = None
            metainfo['ScaleY'] = None

        if physSizeZ is not None:
            metainfo['ScaleZ'] = round(physSizeZ.value(), 3)
        if physSizeZ is None:
            metainfo['ScaleZ'] = None

        # if image file is Carl Zeiss Image - CZI
        if metainfo['Extension'] == '.czi':

            # read the CZI file using the CZIReader
            # pylevel = 0 - read the full resolution image

            imp, metainfo = ImportTools.readCZI(imagefile, metainfo,
                                                stitchtiles=stitchtiles,
                                                setflatres=setflatres,
                                                readpylevel=readpylevel,
                                                setconcat=setconcat,
                                                openallseries=openallseries,
                                                showomexml=showomexml,
                                                attach=attach,
                                                autoscale=autoscale)

        # if image file is not Carl Zeiss Image - CZI
        if metainfo['Extension'] != '.czi':

            # read the imagefile using the correct method
            if metainfo['Extension'].lower() == ('.jpg' or '.jpeg'):
                # use dedicated method for jpg
                imp, metainfo = ImageTools.openjpg(imagefile, method='IJ')
            else:
                # if not jpg - use BioFormats
                imp, metainfo = ImportTools.readbf(imagefile, metainfo,
                                                   setflatres=setflatres,
                                                   readpylevel=readpylevel,
                                                   setconcat=setconcat,
                                                   openallseries=openallseries,
                                                   showomexml=showomexml,
                                                   autoscale=autoscale)

        return imp, metainfo
def processFile():
	# start logging
	IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n")

	# ask user for file
	ofd = OpenDialog("Choose a file", None)  
	filename = ofd.getFileName()  
  
	if filename is None:  
  		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
  		return

  	directory = ofd.getDirectory()  
  	filepath = directory + filename  
  	IJ.log("File path: " + filepath)

	if not filename.endswith(".oir"):
		IJ.log("Not an Olympus (.oir) file.\nNo image to process.\n")
		return

	filenameExExt = os.path.splitext(filename)[0]
      
	# parse metadata
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(filepath)
	numChannels = reader.getSizeC()
	numSlices = reader.getSizeZ()
	numFrames = reader.getSizeT()
	seriesCount = reader.getSeriesCount()

	globalMetadata = reader.getGlobalMetadata()
	seriesMetadata = reader.getSeriesMetadata()

	objLensName = globalMetadata['- Objective Lens name #1']

	areaRotation = float(seriesMetadata['area rotation #1'])
	acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1'])
	if 'regionInfo rotation #1' in seriesMetadata:
		regionInfoRotation = float(seriesMetadata['regionInfo rotation #1'])
	else:
		regionInfoRotation = float(0)

	totalRotation = areaRotation + regionInfoRotation
	physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
	physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
	pxSizeX = physSizeX.value(UNITS.MICROM)
	pxSizeY = physSizeY.value(UNITS.MICROM)

	# log metadata
	IJ.log("\nMETADATA")
	#IJ.log("Filename: " + filepath)
	IJ.log("Number of series: " + str(seriesCount))
	IJ.log("Number of channels: " + str(numChannels))
	IJ.log("Number of frames: " + str(numFrames))
	IJ.log("Number of slices: " + str(numSlices))
	IJ.log("Objective lens: " + objLensName)
	IJ.log("FOV rotation: " + str(areaRotation))
	IJ.log("ROI rotation: " + str(regionInfoRotation))
	IJ.log("Total rotation: " + str(totalRotation))
	IJ.log("Pixel size:")
	IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol())
	IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol())

	# ask user to identify dichroic mirror used for each channel  
	gdDM = GenericDialog("Dichroic mirrors")
	DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] 
	for i in range(numChannels):
		gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0])
	gdDM.addCheckbox("Merge channels", False) 
	gdDM.showDialog()
	if gdDM.wasCanceled():
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
		return
	dichroics = []
	for i in range(numChannels):
		dichroics.append(gdDM.getNextChoice())
	merge = gdDM.getNextBoolean()
	IJ.log("\nUser selected dichroic mirrors")
	for i in range(numChannels):
		IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i])	

	if merge:
		channels = []
		chDict = {}
		for i in range(numChannels):
			chName = "Channel"+str(i+1)
			channels.append(chName)
			chDict[chName] = i
		channels.append("NONE")
		colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"]
		gdMerge = GenericDialog("Merge channels")
		for c in colourChoices:
			gdMerge.addChoice(c + ":", channels, channels[numChannels])
		gdMerge.showDialog()
		if gdMerge.wasCanceled():
			IJ.log("User canceled the dialog!\nImage processing canceled!\n")
			return
		IJ.log("\nUser selected channel colours")
		mergeList = []
		for i in range(len(colourChoices)):
			ch = gdMerge.getNextChoice()
			if ch == "NONE":
				mergeList.append(None)
			else:
				mergeList.append(chDict[ch])
				IJ.log("\t\t" + colourChoices[i] + ": " + ch)

	# ask user for an output directory
	dc = DirectoryChooser("Choose folder for output")  
	od = dc.getDirectory()    
	if od is None:  
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")  
		return  
  
	if merge:
		tifDir = od + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated temporary folder: " + tifDir + "\n")
		else:
			IJ.log("Unable to create temporary folder!\n")
	else:
		tifDir = od + filenameExExt + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated subfolder: " + tifDir + "\n")
		else:
			IJ.log("\nSubfolder " + tifDir +  " already exists")

	# correct images
	tifFilePaths = []
	for i in range(numChannels):
		ip = extractChannel(oirFile=filepath, ch=i)
		if dichroics[i] == "DM1":
			IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.")
		else:
			offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]])
			xom = offsets['x']
			yom = offsets['y']
			if abs(totalRotation) > 0.1:
				rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation)
				xom = rotOff['x']
				yom = rotOff['y']
			xop = int(round(xom/pxSizeX))
			yop = int(round(yom/pxSizeY))
			IJ.log("Channel " + str(i+1) + " offsets")
			IJ.log("\t\tMicrometres")
			IJ.log("\t\t\t\tx = " + str(xom))
			IJ.log("\t\t\t\ty = " + str(yom))
			IJ.log("\t\tPixels")
			IJ.log("\t\t\t\tx = " + str(xop))
			IJ.log("\t\t\t\ty = " + str(yop))
			IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack")

		tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif"
		tifFilePaths.append(tifFilePath)
		if os.path.exists(tifFilePath):
			IJ.log("\nOutput file exists: " + tifFilePath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
			return
		FileSaver(ip).saveAsTiff(tifFilePath)

	if merge:
		for i in range(len(mergeList)):
			if mergeList[i] != None:
				mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]])
		merged = RGBStackMerge.mergeChannels(mergeList, False)
		mergedChannelFilepath = od + filenameExExt + ".tif"
		if os.path.exists(mergedChannelFilepath):
			IJ.log("\nOutput file exists: " + mergedChannelFilepath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
		FileSaver(merged).saveAsTiff(mergedChannelFilepath)
		for tf in tifFilePaths:
			os.remove(tf)
		os.rmdir(tifDir)
			
	IJ.log("\nFinished processing file:\n" + filepath + "\n")
	if merge:
		IJ.log("Image file with channels aligned:\n" + od + filenameExExt + ".tif\n")
	else:
		IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
예제 #9
0
def get_reader(file, complete_meta):
    reader = ImageReader()
    reader.setMetadataStore(complete_meta)
    reader.setId(file)
    return reader
def nucleus_detection(infile, nucleus_channel, stacksize, animation):
	# Detect nucleus with 3d log filters
    fullpath = infile
    infile = filename(infile)
    IJ.log("Start Segmentation " + str(infile))
    # First get Nb Stacks
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(fullpath)
    default_options = "stack_order=XYCZT color_mode=Composite view=Hyperstack specify_range c_begin=" + \
        str(nucleus_channel) + " c_end=" + str(nucleus_channel) + \
        " c_step=1 open=[" + fullpath + "]"
    NbStack = reader.getSizeZ()
    reader.close()
    output = re.sub('.ids', '.csv', infile)
    with open(os.path.join(folder5, output), 'wb') as outfile:
        DETECTwriter = csv.writer(outfile, delimiter=',')
        DETECTwriter.writerow(
            ['spotID', 'roundID', 'X', 'Y', 'Z', 'QUALITY', 'SNR', 'INTENSITY'])
    rounds = NbStack // stacksize
    spotID = 1
    for roundid in xrange(1, rounds + 2):
        # Process stacksize by stacksize otherwise crash because too many spots
        Zstart = (stacksize * roundid - stacksize + 1)
        Zend = (stacksize * roundid)
        if(Zend > NbStack):
            Zend = NbStack % stacksize + (roundid - 1) * stacksize
        IJ.log("Round:" + str(roundid) + ' Zstart=' + str(Zstart) +
               ' Zend=' + str(Zend) + ' out of ' + str(NbStack))
        IJ.run("Bio-Formats Importer", default_options + " z_begin=" +
               str(Zstart) + " z_end=" + str(Zend) + " z_step=1")
        imp = IJ.getImage()
        imp.show()
        cal = imp.getCalibration()
        model = Model()
        settings = Settings()
        settings.setFrom(imp)
        # Configure detector - Manually determined as best
        settings.detectorFactory = LogDetectorFactory()
        settings.detectorSettings = {
            'DO_SUBPIXEL_LOCALIZATION': True,
            'RADIUS': 5.5,
            'TARGET_CHANNEL': 1,
            'THRESHOLD': 50.0,
            'DO_MEDIAN_FILTERING': False,
        }
        filter1 = FeatureFilter('QUALITY', 1, True)
        settings.addSpotFilter(filter1)
        settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory())
        settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory())
        settings.trackerFactory = SparseLAPTrackerFactory()
        settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap()

        trackmate = TrackMate(model, settings)
        ok = trackmate.checkInput()
        if not ok:
            sys.exit(str(trackmate.getErrorMessage()))
        try:
            ok = trackmate.process()
        except:
            IJ.log("Nothing detected, Round:" + str(roundid) + ' Zstart=' +
                   str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack))
            IJ.selectWindow(infile)
            IJ.run('Close')
            continue
        else:
            if animation:
                # For plotting purpose only
                imp.setPosition(1, 1, imp.getNFrames())
                imp.getProcessor().setMinAndMax(0, 4000)
                selectionModel = SelectionModel(model)
                displayer = HyperStackDisplayer(model, selectionModel, imp)
                displayer.render()
                displayer.refresh()
                for i in xrange(1, imp.getNSlices() + 1):
                    imp.setSlice(i)
                    time.sleep(0.05)
            IJ.selectWindow(infile)
            IJ.run('Close')
            spots = model.getSpots()
            spotIt = spots.iterator(0, False)
            sid = []
            sroundid = []
            x = []
            y = []
            z = []
            q = []
            snr = []
            intensity = []
            for spot in spotIt:
                sid.append(spotID)
                spotID = spotID + 1
                sroundid.append(roundid)
                x.append(spot.getFeature('POSITION_X'))
                y.append(spot.getFeature('POSITION_Y'))
                q.append(spot.getFeature('QUALITY'))
                snr.append(spot.getFeature('SNR'))
                intensity.append(spot.getFeature('MEAN_INTENSITY'))
                # Correct Z position
                correct_z = spot.getFeature(
                    'POSITION_Z') + (roundid - 1) * float(stacksize) * cal.pixelDepth
                z.append(correct_z)
            with open(os.path.join(folder5, output), 'ab') as outfile:
                DETECTwriter = csv.writer(outfile, delimiter=',')
                Sdata = zip(sid, sroundid, x, y, z, q, snr, intensity)
                for Srow in Sdata:
                    DETECTwriter.writerow(Srow)
from loci.formats.out import OMETiffWriter
from loci.common.image import IImageScaler
from loci.common.image import SimpleImageScaler
from ome.xml.model.primitives import PositiveInteger
from ome.units import UNITS
from java.lang import Math
from ij import IJ

# settings
file = "/path/to/inputFile.tiff"
outFile = "/path/to/outputFile.ome.tiff"
resolutions = 4
scale = 2

# setup reader and parse metadata
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(file)

# setup resolutions
for i in range(resolutions):
    divScale = Math.pow(scale, i + 1)
    omeMeta.setResolutionSizeX(PositiveInteger(int(reader.getSizeX() / divScale)), 0, i + 1)
    omeMeta.setResolutionSizeY(PositiveInteger(int(reader.getSizeY() / divScale)), 0, i + 1)

# setup writer
writer = OMETiffWriter()
writer.setMetadataRetrieve(omeMeta)
writer.setId(outFile)
type = reader.getPixelType()
	## Collects info about the image stack
	basic_info = parse_tile_info_file(parentLSMFilePath+"_tiles/tile_info.txt")
	make_destination_directories(parentLSMFilePath+"_tiles/v_img/")
	tileConfigFilePath = parentLSMFilePath + "_tiles/resized/TileConfiguration.registered.txt"
	scale_info = estimate_scale_multiplier(parentLSMFilePath+"_tiles/tile_1.ome.tif",parentLSMFilePath+"_tiles/resized/tile_1.tif")
	print scale_info
	coords_list = read_tileconfig_file(tileConfigFilePath)
	coords_normed = normalize_coords_in_list(coords_list)
	coords_upscaled = round_coords(upscale_coords(coords_normed,scale_info[0]))
	write_tileconfig_file(parentLSMFilePath+"_tiles/v_img/TileConfiguration.fullsize.txt",coords_upscaled,".ome.tif")
	max_coords = get_max_coordinates(coords_upscaled)
	print max_coords
	print basic_info

	## Outputs each stitched z plane as a separate file
	iReader = ImageReader()
	iReader.setId(parentLSMFilePath)
	for z in range(max_coords[2]+basic_info[4]):
	## for z in range(50,51):
		IJ.showStatus("z: "+str(z+1)+" of "+str(max_coords[2]+basic_info[4]))
		chIps = []
		resImages = []
		for ch in range(basic_info[0]):
			chIps.append(ByteProcessor(max_coords[0]+scale_info[2],max_coords[1]+scale_info[2]))
		for ch in range(basic_info[0]):
			resImages.append(ImagePlus("ch"+str(ch+1),chIps[ch]))
		for se in range(basic_info[1]):
			IJ.showProgress(se,basic_info[1])
			if z >= coords_upscaled[se][2] and z <= coords_upscaled[se][2]+basic_info[4]-1:
				iReader.setSeries(se)
				for ch in range(basic_info[0]):
		fs.saveAsTiff(new_name)

# Main script

new_dir = make_dir(folder)
# filter for your images, get the files with the accepted endings
listoffiles = [ str(f) for f in os.listdir(folder) if f.endswith(accepted_files)]
# ignore automatically generated hidden files that start with .
real_names = [f for f in listoffiles if not f.startswith(".")]
message = "\n\n >> Now converting the following files: \n"+ "\n --> ".join(real_names) 
IJ.log(message)

for i in real_names:
	image = os.path.join(folder, i)
	print(image)
	message = "\n >> Now processing: \n --> " +str(image)
	IJ.log(message)
	reader = ImageReader()
	# set image id
	reader.setId(image)
	# get series list
	series = reader.getSeriesCount()
	# iterate through series
	for s in range(series):
	       	imps = set_options(image, s)
	       	fixed_name = fix_name(imps)
	       	save_tif(imps, fixed_name, new_dir)

print("DONE!")
IJ.log("\n\n >> DONE!")
예제 #14
0
from ij.gui import Roi, PolygonRoi 

from loci.plugins import BF
from loci.common import Region
from loci.plugins.in import ImporterOptions
from loci.formats import ImageReader, ImageWriter
from loci.formats import MetadataTools
from ome.xml.meta import OMEXMLMetadata

file = "%s"

options = ImporterOptions()
options.setId(file)
imps = BF.openImagePlus(options)

reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(file)

roiCount = omeMeta.getROICount()

if roiCount > 1:
    sys.exit(0)

omeMetaStr =  omeMeta.dumpXML()
shape = omeMeta.getShapeType(0,0)

if 'Polygon' not in shape:
    sys.exit(0)
예제 #15
0
from ij import IJ
from ij.io import OpenDialog
from loci.formats import ImageReader
from loci.formats import MetadataTools

# open file
od = OpenDialog("Choose a file");
filepath = od.getPath()
print("Image path: " + filepath);

# use bio-formats to extract information
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(filepath)

seriesCount = reader.getSeriesCount()
print "Series count:",seriesCount
reader.close()


# configuration
file = "/path/to/inputFile.tiff"
outFile = "/path/to/outputFile.ome.tiff"

# the number of resolutions in the output file
resolutions = 2

# the scale to be used for the downsampling
scale = 2

# set the tile sizes to be used
tileSizeX = 1024
tileSizeY = 1024

# setup reader
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(file)

# add resolution metadata
for i in range(resolutions):
    divScale = Math.pow(scale, i + 1)
    omeMeta.setResolutionSizeX(PositiveInteger(int(reader.getSizeX() / divScale)), 0, i + 1)
    omeMeta.setResolutionSizeY(PositiveInteger(int(reader.getSizeY() / divScale)), 0, i + 1)

# setup writer with tiling
writer = PyramidOMETiffWriter()
writer.setMetadataRetrieve(omeMeta)
tileSizeX = writer.setTileSizeX(tileSizeX)
tileSizeY = writer.setTileSizeY(tileSizeY)
예제 #17
0
def run():
    t_start = datetime.now()
    image_paths = glob(os.path.join(str(import_dir.getPath()), '*tif'))

    print '\tread image metadata'
    reader = ImageReader()
    in_meta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(in_meta)

    x_dims = []
    y_dims = []
    z_dims = []
    c_dims = []
    t_dims = []
    eff = []
    spp = []

    for image_path in image_paths:
        print '\t  parse %s' % (image_path)
        reader.setId(image_path)
        x_dims.append(reader.getSizeX())
        y_dims.append(reader.getSizeY())
        z_dims.append(reader.getSizeZ())
        c_dims.append(reader.getSizeC())
        t_dims.append(reader.getSizeT())
        eff.append(reader.imageCount / z_dims[-1] / t_dims[-1])
        spp.append(reader.getSizeC() / eff[-1])

    format = FormatTools.getPixelTypeString(reader.getPixelType())
    series = reader.getSeries()
    big_endian = Boolean.FALSE
    order = reader.getDimensionOrder()
    reader.close()

    # Compute the dimensions of the output file
    x_dim = max(x_dims)
    y_dim = max(y_dims)
    z_dim = max(z_dims)
    c_dim = max(c_dims)
    t_dim = max(t_dims)

    print '\t  series: %i' % series
    print '\t  format: %s' % format
    print '\t  dimension order: %s' % order
    print '\t  x: %s -> %i' % (x_dims, x_dim)
    print '\t  y: %s -> %i' % (y_dims, y_dim)
    print '\t  z: %s -> %i' % (z_dims, z_dim)
    print '\t  c: %s -> %i' % (c_dims, c_dim)
    print '\t  t: %s -> %i' % (t_dims, t_dim)
    print '\t  effective size c: %s' % eff
    print '\t  samples per pixel: %s' % spp

    # Get the time dimension from the number of input files
    t_dim = len(image_paths)

    # TODO: Tried to work out the order with Axes class, got something weird though.
    dimensions = [Short(x_dim), Short(y_dim), Short(c_dim), Short(z_dim)]

    pixels_per_plane = x_dim * y_dim

    # Assemble the metadata for the output file
    out_meta = MetadataTools.createOMEXMLMetadata()
    out_meta.setImageID(MetadataTools.createLSID('Image', series), series)
    out_meta.setPixelsID(MetadataTools.createLSID('Pixels', series), series)
    out_meta.setPixelsBinDataBigEndian(Boolean.TRUE, 0, 0)
    out_meta.setPixelsDimensionOrder(DimensionOrder.fromString(order), series)
    out_meta.setPixelsType(PixelType.fromString(format), series)
    out_meta.setPixelsSizeX(PositiveInteger(x_dim), series)
    out_meta.setPixelsSizeY(PositiveInteger(y_dim), series)
    out_meta.setPixelsSizeZ(PositiveInteger(z_dim), series)
    out_meta.setPixelsSizeC(PositiveInteger(c_dim), series)
    out_meta.setPixelsSizeT(PositiveInteger(t_dim), series)

    for c in range(c_dim):
        out_meta.setChannelID(MetadataTools.createLSID('Channel', series, c),
                              series, c)
        out_meta.setChannelSamplesPerPixel(PositiveInteger(1), series, c)

    # Initialize the BF writer
    result_path = os.path.join(result_dir.getPath(), result_name)
    writer = ImageWriter()
    writer.setMetadataRetrieve(out_meta)
    writer.setId(result_path)
    print '\tcreated to %s' % (result_path)

    # Write the stacks into the output file
    N = len(image_paths)
    for i, image_path in enumerate(image_paths):
        status.showStatus(i, N, "catenating %i of %i time-points" % (i, N))
        print '\t  processing %s' % (image_path)
        ds = io.open(image_path)
        xi = ds.dimensionIndex(Axes.X)
        xv = ds.dimension(xi)
        yi = ds.dimensionIndex(Axes.Y)
        yv = ds.dimension(yi)
        zi = ds.dimensionIndex(Axes.Z)
        zv = ds.dimension(zi)
        ti = ds.dimensionIndex(Axes.TIME)
        tv = ds.dimension(ti)
        ci = ds.dimensionIndex(Axes.CHANNEL)
        cv = ds.dimension(ci)

        dx = float(x_dim - xv) / 2.0
        dy = float(y_dim - yv) / 2.0
        dz = float(z_dim - zv) / 2.0
        print '\t     translation vector (dx, dy, dz) = (%f, %f, %f)' % (
            dx, dy, dz)

        if (dx != 0) or (dy != 0) or (dz != 0):
            stk = Views.translate(ds, long(dx), long(dy), long(0), long(dz))
            stk = Views.extendZero(stk)
        else:
            stk = Views.extendZero(ds.getImgPlus().getImg())

        print '\t     writing planes ',
        n = 0
        plane = 1
        byte_array = []
        interval_view = Views.interval(stk, \
                                       [Long(0), Long(0), Long(0), Long(0)], \
                                       [Long(x_dim - 1), Long(y_dim - 1), Long(c_dim - 1), Long(z_dim - 1)])
        cursor = interval_view.cursor()
        while cursor.hasNext():
            n += 1
            cursor.fwd()
            value = cursor.get().getInteger()
            bytes = DataTools.shortToBytes(value, big_endian)
            byte_array.extend(bytes)

            if n == pixels_per_plane:
                writer.saveBytes(plane - 1, byte_array)

                print '.',
                if ((plane) % 10) == 0:
                    print '\n\t                    ',

                byte_array = []
                plane += 1
                n = 0

        print ' '

    writer.close()
    t = datetime.now() - t_start
    print '\twrote %i planes to %s in %i sec.' % (plane - 1, result_path,
                                                  t.total_seconds())
    print '... done.'
import sys

sys.path.append(path.abspath(path.dirname(__file__)))
from functions.czi_structure import get_data_structure, get_binning_factor, open_czi_series, \
    get_maxres_indexes
from functions.image_manipulation import extractChannel

piramid_to_open = 1
channel_to_save = 3
final_resolution = 5

# Main
if __name__ in ['__builtin__', '__main__']:
    # get the file
    input_path = IJ.getFilePath("Choose a .czi file")
    reader = ImageReader()
    reader.setId(input_path)
    metadata_list = reader.getCoreMetadataList()
    # slide scanner makes a piramid of X for every ROI you draw
    # resolution is not updated in the metadata so it needs to be calculated manually
    number_of_images, num_of_piramids_list = get_data_structure(metadata_list)
    IJ.log("Number of images is " + str(number_of_images))
    # set names of subimages in the list, waiting to compare to current outputs
    file_core_name = path.basename(input_path).split('.czi')[0]
    # get the indexes of the maximum resolution images
    max_res_indexes = get_maxres_indexes(num_of_piramids_list)
    IJ.log("Number of pyramids are " + str(num_of_piramids_list))
    # set names of subimages in the list, waiting to compare to current outputs
    possible_slices = [
        file_core_name + "_slice-" + str(n) for n in range(number_of_images)
    ]
def get_ome_metadata(source, imagenames):
    """Get the stage coordinates and calibration from the ome-xml for a given list of images

    Arguments:
        source {string} -- Path to the images
        imagenames {list} -- list of images filenames

    Returns:
        a tuple that contains:
        dimensions {int} -- number of dimensions (2D or 3D)
        stage_coordinates_x {list} -- the abosolute stage x-coordinates from ome-xml metadata
        stage_coordinates_y {list} -- the abosolute stage y-coordinates from ome-xml metadata
        stage_coordinates_z {list} -- the abosolute stage z-coordinates from ome-xml metadata
        relative_coordinates_x_px {list} -- the relative stage x-coordinates in px
        relative_coordinates_y_px {list} -- the relative stage y-coordinates in px
        relative_coordinates_z_px {list} -- the relative stage z-coordinates in px
        image_calibration {list} -- x,y,z image calibration in unit/px
        calibration_unit {string} -- image calibration unit
        image_dimensions_czt {list} -- number of images in dimensions c,z,t
    """

    # open an array to store the abosolute stage coordinates from metadata
    stage_coordinates_x = []
    stage_coordinates_y = []
    stage_coordinates_z = []

    for counter, image in enumerate(imagenames):

        # parse metadata
        reader = ImageReader()
        omeMeta = MetadataTools.createOMEXMLMetadata()
        reader.setMetadataStore(omeMeta)
        reader.setId(source + str(image))

        # get hyperstack dimensions from the first image
        if counter == 0:
            frame_size_x = reader.getSizeX()
            frame_size_y = reader.getSizeY()
            frame_size_z = reader.getSizeZ()
            frame_size_c = reader.getSizeC()
            frame_size_t = reader.getSizeT()

            # note the dimensions
            if frame_size_z == 1:
                dimensions = 2
            if frame_size_z > 1:
                dimensions = 3

            # get the physical calibration for the first image series
            physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
            physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
            physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

            # workaround to get the z-interval if physSizeZ.value() returns None.
            z_interval = 1
            if physSizeZ is not None:
                z_interval = physSizeZ.value()

            if frame_size_z > 1 and physSizeZ is None:
                print "no z calibration found, trying to recover"
                first_plane = omeMeta.getPlanePositionZ(0, 0)
                next_plane_imagenumber = frame_size_c + frame_size_t - 1
                second_plane = omeMeta.getPlanePositionZ(
                    0, next_plane_imagenumber)
                z_interval = abs(
                    abs(first_plane.value()) - abs(second_plane.value()))
                print "z-interval seems to be: ", z_interval

            # create an image calibration
            image_calibration = [
                physSizeX.value(),
                physSizeY.value(), z_interval
            ]
            calibration_unit = physSizeX.unit().getSymbol()
            image_dimensions_czt = [frame_size_c, frame_size_z, frame_size_t]

        reader.close()

        # get the plane position in calibrated units
        current_position_x = omeMeta.getPlanePositionX(0, 0)
        current_position_y = omeMeta.getPlanePositionY(0, 0)
        current_position_z = omeMeta.getPlanePositionZ(0, 0)

        # get the absolute stage positions and store them
        pos_x = current_position_x.value()
        pos_y = current_position_y.value()

        if current_position_z is None:
            print "the z-position is missing in the ome-xml metadata."
            pos_z = 1.0
        else:
            pos_z = current_position_z.value()

        stage_coordinates_x.append(pos_x)
        stage_coordinates_y.append(pos_y)
        stage_coordinates_z.append(pos_z)

    # calculate the store the relative stage movements in px (for the grid/collection stitcher)
    relative_coordinates_x_px = []
    relative_coordinates_y_px = []
    relative_coordinates_z_px = []

    for i in range(len(stage_coordinates_x)):
        rel_pos_x = (stage_coordinates_x[i] -
                     stage_coordinates_x[0]) / physSizeX.value()
        rel_pos_y = (stage_coordinates_y[i] -
                     stage_coordinates_y[0]) / physSizeY.value()
        rel_pos_z = (stage_coordinates_z[i] -
                     stage_coordinates_z[0]) / z_interval

        relative_coordinates_x_px.append(rel_pos_x)
        relative_coordinates_y_px.append(rel_pos_y)
        relative_coordinates_z_px.append(rel_pos_z)

    return (dimensions, stage_coordinates_x, stage_coordinates_y,
            stage_coordinates_z, relative_coordinates_x_px,
            relative_coordinates_y_px, relative_coordinates_z_px,
            image_calibration, calibration_unit, image_dimensions_czt)
def initreader(vsi_path):
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(vsi_path)
    return(reader)
예제 #21
0
def process_time_points(root, files, outdir):
	'''Concatenate images and write ome.tiff file. If image contains already multiple time points just copy the image'''
	concat = 1
	files.sort()
	options = ImporterOptions()
	options.setId(files[0])
	options.setVirtual(1)
	image = BF.openImagePlus(options)
	image = image[0]
	if image.getNFrames() > 1:
		IJ.log(files[0] + " Contains multiple time points. Can only concatenate single time points! Don't do anything!")
		image.close()
		return
	
	width  = image.getWidth()
	height = image.getHeight()
	for patt in pattern:
		outName = re.match(patt, os.path.basename(files[0]))
		if outName is None:
			continue
		if outdir is None:
			outfile = os.path.join(root, outName.group(1) + '.ome.tif')
		else:
			outfile =  os.path.join(outdir, outName.group(1) + '.ome.tif')
		reader = ImageReader()
		reader.setMetadataStore(MetadataTools.createOMEXMLMetadata())
		reader.setId(files[0])
		timeInfo = []
		omeOut = reader.getMetadataStore()
		omeOut = setUpXml(omeOut, image, files)
		reader.close()
		image.close()
		IJ.log ('Concatenates ' + os.path.join(root, outName.group(1) + '.ome.tif'))
		itime = 0
		try:
			for ifile, fileName in enumerate(files):
				print fileName
				omeMeta = MetadataTools.createOMEXMLMetadata()
	
				reader.setMetadataStore(omeMeta)
				reader.setId(fileName)
				#print omeMeta.getPlaneDeltaT(0,0)
				#print omeMeta.getPixelsTimeIncrement(0)
				
				if fileName.endswith('.czi'):
					if ifile == 0:
						T0 = omeMeta.getPlaneDeltaT(0,0).value()
					dT = omeMeta.getPlaneDeltaT(0,0).value() - T0
					unit =  omeMeta.getPlaneDeltaT(0,0).unit()
				else:
					timeInfo.append(getTimePoint(reader, omeMeta))
	 				unit = omeMeta.getPixelsTimeIncrement(0).unit()
					try:
						dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2)
					except:
						dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds
				
				nrImages = reader.getImageCount()
	
	
				for i in range(0, reader.getImageCount()):
	
					try:
						omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages)
					except TypeError:
						omeOut.setPlaneDeltaT(Time(dT, unit),0, i + itime*nrImages)
					omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages)
					omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages)
					omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages)
					omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages)
					omeOut.setPlaneTheT(NonNegativeInteger(itime), 0, i + itime*nrImages)
					omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages)
				itime = itime + 1
				reader.close()
	
				IJ.showProgress(files.index(fileName), len(files))
			try:
				incr = float(dT/(len(files)-1))
			except:
				incr = 0
			
			try:
				omeOut.setPixelsTimeIncrement(incr, 0)
			except TypeError:
				#new Bioformats >5.1.x
				omeOut.setPixelsTimeIncrement(Time(incr, unit),0)
			
			outfile = concatenateImagePlus(files, outfile)
			if outfile is not None:
				filein = RandomAccessInputStream(outfile)
				fileout = RandomAccessOutputStream(outfile)
				saver = TiffSaver(fileout, outfile)
				saver.overwriteComment(filein,omeOut.dumpXML())
				fileout.close()
				filein.close()
	
	
		except:
			traceback.print_exc()
		finally:
			#close all possible open files
			try:
				reader.close()
			except:
				pass
			try:
				filein.close()
			except:
				pass
			try:
				fileout.close()
			except:
@String(label="Base Directory", style="") str_dir
@String(label="Image Title", style="") str_img_nam
@String(label="Image Extension", style="", value=".tif") str_img_ext
file = str_dir + "/" + str_img_nam + str_img_ext
# read in and display ImagePlus object(s)
from loci.plugins import BF
from loci.formats import ImageReader
from loci.formats import MetadataTools
from ij import IJ
from ome.units import UNITS

imps = BF.openImagePlus(file)
for imp in imps:
    imp.show()
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(file)
    seriesCount = reader.getSeriesCount()
    reader.close()
    # print out series count from two different places (they should always match!)
    imageCount = omeMeta.getImageCount()
    IJ.log("Total # of image series (from BF reader): " + str(seriesCount))
    IJ.log("Total # of image series (from OME metadata): " + str(imageCount))
    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)
    IJ.log("Physical calibration:")
    if (physSizeX is not None):
    	IJ.log("\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol()
		+ " = " + str(physSizeX.value(UNITS.MICROM)) + " microns")
예제 #23
0
num = r.getImageCount()
width = r.getSizeX()
height = r.getSizeY()
md = r.getGlobalMetadata()
# print(type(md))
# print(num, width, height)
stack = ImageStack(width, height)
i = 0
ip = r.openProcessors(i)[0]
stack.addSlice("1", ip);
imp = ImagePlus("foo", stack);
r.close()
imp.show()
IJ.run("Enhance Contrast", "saturated=0.35")

imageReader = ImageReader()
meta = MetadataTools.createOMEXMLMetadata()
imageReader.setMetadataStore(meta)
imageReader.setId(filePath)
pSizeX = meta.getPixelsPhysicalSizeX(0)
pSizeY = meta.getPixelsPhysicalSizeY(0)
imageReader.close()
print(pSizeX, pSizeY)
print(meta.getPixelsSizeX(0))
print(meta.getPixelsSizeY(0))





예제 #24
0
def get_lif_series_length(fpath):
  reader = ImageReader()
  reader.setId(fpath)
  return reader.getSeriesCount();
예제 #25
0
def main():

    Interpreter.batchMode = True

    if (lambda_flat == 0) ^ (lambda_dark == 0):
        print ("ERROR: Both of lambda_flat and lambda_dark must be zero,"
               " or both non-zero.")
        return
    lambda_estimate = "Automatic" if lambda_flat == 0 else "Manual"

    print "Loading images..."

    # Use BioFormats reader directly to determine dataset dimensions without
    # reading every single image. The series count (num_images) is the one value
    # we can't easily get any other way, but we might as well grab the others
    # while we have the reader available.
    bfreader = ImageReader()
    bfreader.id = str(filename)
    num_images = bfreader.seriesCount
    num_channels = bfreader.sizeC
    width = bfreader.sizeX
    height = bfreader.sizeY
    bfreader.close()

    # The internal initialization of the BaSiC code fails when we invoke it via
    # scripting, unless we explicitly set a the private 'noOfSlices' field.
    # Since it's private, we need to use Java reflection to access it.
    Basic_noOfSlices = Basic.getDeclaredField('noOfSlices')
    Basic_noOfSlices.setAccessible(True)
    basic = Basic()
    Basic_noOfSlices.setInt(basic, num_images)

    # Pre-allocate the output profile images, since we have all the dimensions.
    ff_image = IJ.createImage("Flat-field", width, height, num_channels, 32);
    df_image = IJ.createImage("Dark-field", width, height, num_channels, 32);

    print("\n\n")

    # BaSiC works on one channel at a time, so we only read the images from one
    # channel at a time to limit memory usage.
    for channel in range(num_channels):
        print "Processing channel %d/%d..." % (channel + 1, num_channels)
        print "==========================="

        options = ImporterOptions()
        options.id = str(filename)
        options.setOpenAllSeries(True)
        # concatenate=True gives us a single stack rather than a list of
        # separate images.
        options.setConcatenate(True)
        # Limit the reader to the channel we're currently working on. This loop
        # is mainly why we need to know num_images before opening anything.
        for i in range(num_images):
            options.setCBegin(i, channel)
            options.setCEnd(i, channel)
        # openImagePlus returns a list of images, but we expect just one (a
        # stack).
        input_image = BF.openImagePlus(options)[0]

        # BaSiC seems to require the input image is actually the ImageJ
        # "current" image, otherwise it prints an error and aborts.
        WindowManager.setTempCurrentImage(input_image)
        basic.exec(
            input_image, None, None,
            "Estimate shading profiles", "Estimate both flat-field and dark-field",
            lambda_estimate, lambda_flat, lambda_dark,
            "Ignore", "Compute shading only"
        )
        input_image.close()

        # Copy the pixels from the BaSiC-generated profile images to the
        # corresponding channel of our output images.
        ff_channel = WindowManager.getImage("Flat-field:%s" % input_image.title)
        ff_image.slice = channel + 1
        ff_image.getProcessor().insert(ff_channel.getProcessor(), 0, 0)
        ff_channel.close()
        df_channel = WindowManager.getImage("Dark-field:%s" % input_image.title)
        df_image.slice = channel + 1
        df_image.getProcessor().insert(df_channel.getProcessor(), 0, 0)
        df_channel.close()

        print("\n\n")

    template = '%s/%s-%%s.tif' % (output_dir, experiment_name)
    ff_filename = template % 'ffp'
    IJ.saveAsTiff(ff_image, ff_filename)
    ff_image.close()
    df_filename = template % 'dfp'
    IJ.saveAsTiff(df_image, df_filename)
    df_image.close()

    print "Done!"
	return n

### END UTILITY FUNCTIONS

outdir = sipmm_outputFile.getAbsolutePath()

#imps = BF.openImagePlus()

from ij import IJ
if debugging:
	IJ.run("Close All")

from loci.formats import ImageReader
from loci.formats import MetadataTools
from loci.plugins.in import ImporterOptions
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(sipmm_inputFile.getAbsolutePath())
seriesCount = reader.getSeriesCount()
reader.close()

log('Found {} series'.format(seriesCount))

outfile = os.path.join(outdir,'results.csv')
h = 'Name,path,Rarea,Rmean,Rstd,Garea,Gmean,Gstd,GQarea,GQmean,GQintden,GQstd,nPunctae,RMregions,maxxxxx'
with open(outfile,'a') as of:
		of.write(h+'\n')

for impi in range(seriesCount):
	log('Analyzing series {}/{}...'.format(impi+1,seriesCount))
예제 #27
0
    if mipPrefJPG:
        FileSaver(img).saveAsJpeg(file_path + ".jpg")


for root, dirs, files in os.walk(inDir):
    for file in files:
        if file.endswith(fileExt):
            logging.info('Starting image #%i (%s)', imageCount, str(file))
            options = ImporterOptions()
            options.setAutoscale(True)
            options.setId(os.path.join(root, file))
            options.setSplitChannels(True)
            imps = BF.openImagePlus(options)
            imageCount += 1
            for imp in imps:
                reader = ImageReader()
                omeMeta = MetadataTools.createOMEXMLMetadata()
                reader.setMetadataStore(omeMeta)
                reader.setId(os.path.join(root, file))

                filename = str(imp)
                channel_id = int(re.findall("C=(\d)", filename)[0])
                channel_name = omeMeta.getChannelName(0, channel_id)
                out_name = filename.split('"')[1]
                out_name = out_name.split(fileExt)[0] + "_" + str(channel_name)
                out_name = out_name.replace(" ", "")

                physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
                physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
                physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)
                stackSizeX = omeMeta.getPixelsSizeX(0).getValue()
예제 #28
0
def process(filename):
	TEMPLATE_GENERAL 		= "The data was acquired on a {ID} microscope, using a {objective} {NA} NA objective. The pixel size was {pxx_microns} microns. "
	TEMPLATE_CHANNEL		= "The excitation and emission wavelengths for channel {ch} were {ex} and {em} and the {exposureTime} was {et}. "
	TEMPLATE_3D				= "A series of slices was collected with a step size of {pzz_microns} microns. "
	TEMPLATE_TIME			= "Images were acquired with a time interval of {timeInterval}. "
	
	BLURB = ""
	
	# Admin stuff
	import sys
	
	from org.scijava.ui.swing.console import LoggingPanel
	logger.addLogListener( LoggingPanel(context) );
	
	logger.info(filename.getAbsolutePath())
	
	# Get a BioFormats reader
	from loci.formats import ImageReader
	ir = ImageReader()
	
	# Adapted from https://github.com/ome/bioformats/blob/develop/components/formats-gpl/utils/GetPhysicalMetadata.java
	m = omeservice.createOMEXMLMetadata()
	
	ir.setMetadataStore(m)
	ir.setId(filename.getAbsolutePath())
	
	# Some checks
	ninstruments 	= m.getInstrumentCount()
	if ninstruments > 1:
		logger.error("More than one instrument found. Automatic generation will not work...")
	if ninstruments == 0:
		logger.error("No instrument metadata found! Automatic generation will not work...")
	
	# Manufacturer and modalities
	try:
		ID = m.getMicroscopeManufacturer(0)
	except:
		logger.error(sys.exc_info()[0])
		ID = None
		
	if ID == None:
		ff = str(ir.getFormat())
		if "Zeiss" in ff:
			ID="Zeiss"
		elif "Nikon" in ff:
			ID="Nikon"
	
			tID = ir.getMetadataValue("m_sMicroscopePhysFullName")
			if tID is not None:
				ID = tID
				
		elif "Olympus" in ff:
			ID="Olympus"
		else:
			ID=""
	
	for ic in range(ir.getSizeC()):
		mode = m.getChannelAcquisitionMode(0,ic)
	
		if ic>0 and mode != mode0:
			logger.warn("WARNING : Not all channels were acquired with the same modality..")
		else:
			mode0=mode
	
	if mode == None:
		mode_with_spaces = "UNKNOWN"
	else:
		mode_with_spaces = ""
		if str(mode) == "TIRF":
			mode_with_spaces = str(mode)
		else:
			for letter in str(mode):
				if letter.isupper():
					mode_with_spaces += " "+letter.lower()
				else:
					mode_with_spaces += letter
	
	ID+=" "+str(mode_with_spaces.strip())
	
	if ninstruments == 1:
		nobjectives		= m.getObjectiveCount(0)
		if nobjectives > 1:
			logger.error("More than one objective found. Automatic generation will generate information for the first objective only.")
	
	objective = "UNKNOWN"
	if ninstruments == 1 and nobjectives >0:
		try:
			magnification1 	= m.getObjectiveNominalMagnification(0,0)
	
			if magnification1 != None:
				objective = "{:.0f}x".format(magnification1)
		except:
			logger.error(sys.exc_info()[0])
			msg = "Could not extract information about the objective! The image might be missing some crucial metadata."
			logger.error(msg)
	
	if objective == "UNKNOWN":
		if "Nikon" in ff:
			objective0 = str(ir.getMetadataValue("sObjective"))
			if objective0 is not None:
				objective = objective0
			
	
	NA = "UNKNOWN"
	if ninstruments == 1 and nobjectives >0:
		try:
			NA1 = m.getObjectiveLensNA(0,0)
			
			if NA1 != None:
				NA = str(NA1)
		except:
			msg = "Could not extract information about the objective! The image might be missing some crucial metadata."
			logger.error(msg)
				
	NAm = ir.getMetadataValue("Numerical Aperture")
	if NA=="UNKNOWN" and "Nikon" in ff and NAm is not None:
		NA = str(NAm)
	#else:
	#	HT=ir.getGlobalMetadata()
	#	for k in HT.keys():
	#		print "{}={}".format(k,HT.get(k))
	
	# Pixel size
	nimages = m.getImageCount()
	logger.info("Found {} images".format(nimages))
	
	from ome.units import UNITS
	
	pxx_microns = "UNKNOWN"
	if ninstruments==1 and nobjectives>0:
		try:
			pxx_microns = "{:.2f}".format(m.getPixelsPhysicalSizeX(0).value(UNITS.MICROMETER))
		except:
			logger.error(sys.exc_info()[0])
			msg = "Could not extract physical pixel size! The image might be missing some crucial metadata."
			logger.error(msg)
	
	# Is it 3D?
	is3D = ir.getSizeZ()>1
	
	pzz_microns = "UNKNOWN"
	
	if ninstruments==1 and nobjectives>0:
		try:
			pzz_microns = "{:.2f}".format(m.getPixelsPhysicalSizeZ(0).value(UNITS.MICROMETER))
		except:
			logger.error(sys.exc_info()[0])
			msg = "This image is 3D but I could not extract physical step size! The image might be missing some crucial metadata."
			logger.error(msg)
	
	
	
	# TODO Is it a time series?
	
	# GENERAL BLURB GENERATION
	BLURB += TEMPLATE_GENERAL.format(ID=ID, objective=objective, NA=NA, pxx_microns=pxx_microns)
	if is3D:
		BLURB += TEMPLATE_3D.format(pzz_microns=pzz_microns)
	
	# Extract channel information
	for ic in range(ir.getSizeC()):
		try:
			ex0 = m.getChannelExcitationWavelength(0,ic)
			
			if ex0==None:
				ex = "UNKNOWN"
			else:
				ex="{:.0f} nm".format(ex0.value(UNITS.NANOMETER))
		except:
			logger.error(sys.exc_info()[0])
			logger.error("Wasn't able to extract channel wavelength information for channel {}.".format(ic+1))
			continue
	
		try:
			em0 = m.getChannelEmissionWavelength(0,ic)
			
			if em0==None:
				em = "UNKNOWN"
			else:
				em="{:.0f} nm".format(em0.value(UNITS.NANOMETER))
		except:
			logger.error(sys.exc_info()[0])
			logger.error("Wasn't able to extract channel wavelength information for channel {}.".format(ic+1))
			continue
	
		#try:
		ix = ir.getIndex(0, ic, 0)		# NOTE : First z plane, first timepoint only
		et = m.getPlaneExposureTime(0,ix)
	
		if et==None:
			et = "UNKNOWN"
		else:
			etms = et.value(UNITS.MILLISECOND)

			if "CZI" in ff: # TODO Check if error is across other images
				logger.warn("The exposure time was divided by 1000 to account for ms mistaken as s in CZI files")
				
				etms = etms/1000
				
			if etms<1000:
				et=str("{:.2f} ms".format(etms))
			else:
				et=str("{} s".format(etms/1000))
	
				if etms/1000>600:
					logger.warn("Exposure time for channel {} is {}s. That's longer than 10m, please double check metadata to make sure it's correct".format(ic+1,etms/1000))
	
		BLURB += TEMPLATE_CHANNEL.format(ch=ic+1, ex=ex, exposureTime="exposure time", et=et, em=em)		
		#except:
		#	logger.error("Wasn't able to extract channel {} exposure time information.".format(ic+1))

	return BLURB
예제 #29
0
def get_reader(file, complete_meta):
	reader = ImageReader()
	reader.setMetadataStore(complete_meta)
	reader.setId(file)
	return reader
from loci.common.image import SimpleImageScaler
from ome.xml.model.primitives import PositiveInteger
from java.lang import Math
from ij import IJ
import math

# configuation
file = "/path/to/inputFile.tiff"
outFile = "/path/to/outputFile.ome.tiff"
resolutions = 3
scale = 2
tileSizeX = 512
tileSizeY = 512

# setup image reader and writer
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(file)

writer = OMETiffWriter()
writer.setMetadataRetrieve(omeMeta)
writer.setInterleaved(reader.isInterleaved())
writer.setTileSizeX(tileSizeX)
writer.setTileSizeY(tileSizeY)
writer.setId(outFile)

# convert to OME-TIFF using tiled reading and writing
for series in range(reader.getSeriesCount()):
    reader.setSeries(series)
    writer.setSeries(series)