예제 #1
0
def get_calibration_from_metadata(path_to_image):
    """get the pixel calibration from a given image using Bio-Formats

    Parameters
    ----------
    path_to_image : str
        full path to the input image

    Returns
    -------
    array
        the physical px size as float for x,y,z
    """
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(str(path_to_image))

    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)
    image_calibration = [physSizeX.value(), physSizeY.value(), physSizeZ.value()]
    reader.close()

    return image_calibration
def DirList(baseDir):
    r = ImageReader()
    imgStats = {}
    for root, dirs, files in os.walk(str(baseDir)):
        for f1 in files:
            if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith(
                    ".jpeg"):
                id = root + "/" + f1
                r.setId(id)
                if r is None:
                    print "Couldn\'t open image from file:", id
                    continue
                w = r.getSizeX()
                h = r.getSizeY()
                imgStats[str(w) + "_" +
                         str(h)] = imgStats.get(str(w) + "_" + str(h), 0) + 1
                IJ.log("Found image: " + str(id))
                #counter += 1
    r.close()
    #print summary
    summary = ''
    for k, v in imgStats.iteritems():
        dim = k.split("_")
        ratio = float(dim[0]) / float(dim[1])
        IJ.log("Found " + str(v) + " images of dimension " + str(dim[0]) +
               "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2)))
        summary = summary + "\nFound " + str(
            v) + " images of dimension " + str(dim[0]) + "x" + str(
                dim[1]) + " apect ratio " + str(round(ratio, 2))
    return summary
def DirList(baseDir):
	r = ImageReader()
	imgStats = {}
	for root, dirs, files in os.walk(str(baseDir)):
		for f1 in files:
			if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith(".jpeg"):
				id = root + "/" +  f1
				r.setId(id)
				if r is None:
					print "Couldn\'t open image from file:", id
					continue
				w = r.getSizeX()
				h = r.getSizeY()
				imgStats[str(w) + "_" + str(h)] = imgStats.get(str(w) + "_" + str(h), 0)+1
				IJ.log("Found image: " + str(id))
				#counter += 1
	r.close()
	#print summary
	summary = ''
	for k, v in imgStats.iteritems():
		dim = k.split("_")
		ratio = float(dim[0])/float(dim[1])
		IJ.log("Found " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2)))
		summary = summary + "\nFound " + str(v) + " images of dimension " + str(dim[0]) + "x" + str(dim[1]) + " apect ratio " + str(round(ratio, 2))
	return summary
def get_reader(file, inputMeta):
	options = ImporterOptions()
	options.setId(file)
	imps = BF.openImagePlus(options)
	reader = ImageReader()
	reader.setMetadataStore(inputMeta)
	reader.setId(file)
	return reader
def Z1_metadata(sourcefile):
	# Access header of Z1 lighsheet data to determine nb views
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(sourcefile)
    seriesCount = reader.getSeriesCount()
    reader.close()
    return seriesCount
예제 #6
0
def getFrameIntervalFromImage(image_path):
    from loci.formats import ImageReader
    from loci.formats import MetadataTools
    r = ImageReader()
    meta = MetadataTools.createOMEXMLMetadata()
    r.setMetadataStore(meta)
    r.setId(image_path)
    frame_interval = meta.getPixelsTimeIncrement(0).value()
    log("Detected frame rate: %s (%s)" % (frame_interval, image_path))
    return frame_interval
예제 #7
0
def getFrameIntervalFromImage(image_path):
    from loci.formats import ImageReader
    from loci.formats import MetadataTools
    r = ImageReader()
    meta = MetadataTools.createOMEXMLMetadata()
    r.setMetadataStore(meta)
    r.setId(image_path)
    frame_interval = meta.getPixelsTimeIncrement(0).value()
    log("Detected frame rate: %s (%s)" % (frame_interval, image_path))
    return frame_interval
예제 #8
0
def meta_parser():
    """ Iterates through .lif XML/OME metadata, returns selected values eg. timepoints, channels, series count, laser power.. """

    # Get metadata.
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(str(Experiment))

    # Extracts number of image series, channel number
    seriesCount = reader.getSeriesCount()
    channels = reader.getSizeC()
    #reader.close()

    # Number of images
    imageCount = omeMeta.getImageCount()

    # Image size in pixels AND microns (for scalebar).
    Physical_x = omeMeta.getPixelsPhysicalSizeX(0)
    Pixel_x = omeMeta.getPixelsSizeX(0)
    Physical_x = Physical_x.value()
    Pixel_x = Pixel_x.getNumberValue()

    # Assumes square image (x=y).
    org_size = (Physical_x*Pixel_x)*2

    # Laser power of donor excitation laser.
    if channels == 3:
        LP = omeMeta.getChannelLightSourceSettingsAttenuation(0,0)
        LP = 1 - LP.getNumberValue()
    else:
        LP = 0



    timelist = []
    for timepoint in range (imageCount):
        times = omeMeta.getImageAcquisitionDate(timepoint)
        timelist.append(times.toString())
	
	
    # YY.MM... to minutes.
    timelist =[ time.mktime(time.strptime(times, u'%Y-%m-%dT%H:%M:%S')) for times in timelist ]
    timelist_unsorted =[ (times - timelist[0])/60 for times in timelist ]

    timelist = sorted(timelist_unsorted)

    # Prints to log.
    IJ.log("Total # of image series (from BF reader): " + str(seriesCount))
    IJ.log("Total # of image series (from OME metadata): " + str(imageCount))
    IJ.log("Total # of channels (from OME metadata): " + str(channels))
    IJ.log("Laserpower (from OME metadata): " + str(LP))
    return channels, seriesCount, timelist, timelist_unsorted, LP, org_size
예제 #9
0
def get_metadata(imagefile, imageID=0):

    metainfo = {}

    # initialize the reader and get the OME metadata
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    metainfo['ImageCount_OME'] = omeMeta.getImageCount()
    reader.setMetadataStore(omeMeta)
    reader.setId(imagefile)
    metainfo['SeriesCount_BF'] = reader.getSeriesCount()
    reader.close()

    # read dimensions TZCXY from OME metadata
    metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue()
    metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue()
    metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue()
    metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue()
    metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue()

    # store info about stack
    if metainfo['SizeZ'] == 1:
        metainfo['is3d'] = False
    elif metainfo['SizeZ'] > 1:
        metainfo['is3d'] = True

    # get the scaling for XYZ
    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

    if physSizeX is not None:
        metainfo['ScaleX'] = round(physSizeX.value(), 3)
        metainfo['ScaleY'] = round(physSizeY.value(), 3)
    if physSizeX is None:
        metainfo['ScaleX'] = None
        metainfo['ScaleY'] = None

    if physSizeZ is not None:
        metainfo['ScaleZ'] = round(physSizeZ.value(), 3)
    if physSizeZ is None:
        metainfo['ScaleZ'] = None

    # sort the dictionary
    metainfo = OrderedDict(sorted(metainfo.items()))

    return metainfo
예제 #10
0
def get_metadata(params):
    """get image metadata, either from the image file or from acquisition-time metadata"""
    if params.metadata_source == "Image metadata":
        try:
            reader = ImageReader()
            ome_meta = MetadataTools.createOMEXMLMetadata()
            reader.setMetadataStore(ome_meta)
            reader.setId(params.input_image_path)
            reader.close()
            params.setFrameInterval(
                ome_meta.getPixelsTimeIncrement(0).value())
            params.setIntervalUnit(
                ome_meta.getPixelsTimeIncrement(0).unit().getSymbol())
            params.setPixelPhysicalSize(
                ome_meta.getPixelsPhysicalSizeX(0).value())
            params.setPixelSizeUnit(
                ome_meta.getPixelsPhysicalSizeX(0).unit().getSymbol())
            params.setMetadataSourceFile(None)
        except Exception as e:
            print(e.message)
            mbui.warning_dialog([
                "There was a problem getting metadata from the image: ",
                e.message,
                "Please consider using acquisition metadata instead (click OK). ",
                "Or, quit the analysis run and investigate image metadata by hand. "
            ])
            params.setMetadataSource("Acquisition metadata")
    if params.metadata_source == "Acquisition metadata":
        od = OpenDialog('Choose acquisition metadata file...',
                        os.path.dirname(params.input_image_path), '*.txt')
        file_path = od.getPath()
        if file_path is None:
            raise IOError('no metadata file chosen')
        acq_metadata_dict = import_iq3_metadata(file_path)
        try:
            params.setFrameInterval(acq_metadata_dict['frame_interval'])
        except KeyError:
            params.setFrameInterval(1.0)
        try:
            params.setIntervalUnit(acq_metadata_dict['time_unit'])
        except KeyError:
            params.setIntervalUnit('frames')
        params.setPixelPhysicalSize(acq_metadata_dict['x_physical_size'])
        params.setPixelSizeUnit(acq_metadata_dict['x_unit'])
        params.setMetadataSourceFile(file_path)
    return params
예제 #11
0
def load_ome_img(file_name):
    """

    :param file_name:
    :return:
    """
    imps = BF.openImagePlus(file_name)
    imag = imps[0]
    # parse metadata
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(file_name)
    print(omeMeta)
    reader.close()

    return (imag, omeMeta)
예제 #12
0
    def select_input(self, event):
        # get the info about the number of images in the file
        self.input_path = IJ.getFilePath("Choose a File")
        # if default naming is not changed use file name
        if self.textfield2.text == self.default_naming:
            self.file_core_name = path.basename(self.input_path).split('.czi')[0]
        else:
            self.file_core_name = self.textfield2.text
        # put that name in the text field
        self.panel.getComponents()[1].setText(self.file_core_name)

        reader = ImageReader()
        reader.setId(self.input_path)
        metadata_list = reader.getCoreMetadataList()
        # slide scanner makes a piramid of X for every ROI you draw
        # resolution is not updated in the metadata so it needs to be calculated manually
        number_of_images, self.num_of_piramids_list = get_data_structure(metadata_list)
        print("Number of images is " + str(number_of_images))
        # get the indexes of the maximum resolution images
        self.max_res_indexes = get_maxres_indexes(self.num_of_piramids_list)
        print("Number of pyramids are " + str(self.num_of_piramids_list))
        # set names of subimages in the list, waiting to compare to current outputs
        self.possible_slices = [self.file_core_name + "_slice-" + str(n)
                                for n in range(number_of_images)]

        self.binFactor_list, self.binStep_list = get_binning_factor(self.max_res_indexes,
                                                self.num_of_piramids_list, metadata_list)
        print("Binning factors are " + str(self.binFactor_list))
        print("Binning steps are " + str(self.binStep_list))

        # create output directory if it doesn't exist
        # get the animal id
        animal_id = self.file_core_name.split('_')[0]
        self.output_path = path.join(path.dirname(path.dirname(self.input_path)),
                                     "Processed_data", animal_id, "ROIs")
        if path.isdir(self.output_path):
            print("Output path was already created")
        else:
            makedirs(self.output_path)
            print("Output path created")

        # update_lists depending on whether something has been processed already
        self.update_list()
예제 #13
0
def time_parser():
    """ Iterates through timelapse,                           """
    """ outputs timepoints with corresponding seriesnames.    """
    """ - S. Grødem 2017                                      """
    
    # Get metadata.
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(str(Experiment))

	# Extracts number of image series, channel number
    seriesCount = reader.getSeriesCount()
    reader.close()

    # Gets timepoints, in minutes.
    timelist = []
    namelist = []
    
    for timepoint in range (seriesCount):
        times = omeMeta.getImageAcquisitionDate(timepoint)
        timelist.append(times.toString())
        namelist.append(omeMeta.getImageName(timepoint))

    # YY.MM... to minutes.
    timelist =[ time.mktime(time.strptime(times, u'%Y-%m-%dT%H:%M:%S')) for times in timelist ]
    timelist_unsorted =[ (times - timelist[0])/60 for times in timelist ]

    # Sort timepoints.
    timelist, namelist = zip(*sorted(zip(timelist_unsorted, namelist)))
    timelist = [round(float(i), 3) for i in timelist]

    # Output to IJ log
    images = zip(timelist, namelist)
    IJ.log("Series number: " + str(seriesCount))
    IJ.log("*"*15)
    
    for i in range(len(images)):
        IJ.log("Name: " + str(images[i][1]))
        IJ.log("Time: " + str(images[i][0]))
        IJ.log("-"*15)
def choose_series(filepath, params):
	"""if input file contains more than one image series (xy position), prompt user to choose which one to use"""
	# todo: if necessary (e.g. if lots of series), can improve thumbnail visuals based loosely on https://github.com/ome/bio-formats-imagej/blob/master/src/main/java/loci/plugins/in/SeriesDialog.java
	import_opts = ImporterOptions();
	import_opts.setId(filepath);
	
	reader = ImageReader();
	ome_meta = MetadataTools.createOMEXMLMetadata();
	reader.setMetadataStore(ome_meta);
	reader.setId(filepath);
	no_series = reader.getSeriesCount();
	if no_series == 1:
		return import_opts, params;
	else:
		series_names = [ome_meta.getImageName(idx) for idx in range(no_series)];
		dialog = GenericDialog("Select series to load...");
		dialog.addMessage("There are multiple series in this file! \n" + 
						"This is probably because there are multiple XY stage positions. \n " + 
						"Please choose which series to load: ");
		thumbreader = BufferedImageReader(reader);
		cbg = CheckboxGroup();
		for idx in range(no_series):
			p = Panel();
			p.add(Box.createRigidArea(Dimension(thumbreader.getThumbSizeX(), thumbreader.getThumbSizeY())));
			ThumbLoader.loadThumb(thumbreader, idx, p, True);
			dialog.addPanel(p);
			cb = Checkbox(series_names[idx], cbg, idx==0);
			p.add(cb);

		dialog.showDialog();
		if dialog.wasCanceled():
			raise KeyboardInterrupt("Run canceled");
		if dialog.wasOKed():
			selected_item = cbg.getSelectedCheckbox().getLabel();
			selected_index = series_names.index(selected_item);
			params.setSelectedSeriesIndex(selected_index);
			for idx in range(0, no_series):
				import_opts.setSeriesOn(idx, True) if (idx==selected_index) else import_opts.setSeriesOn(idx, False);
	reader.close();
	return import_opts, params
	basic_info = parse_tile_info_file(parentLSMFilePath+"_tiles/tile_info.txt")
	make_destination_directories(parentLSMFilePath+"_tiles/v_img/")
	tileConfigFilePath = parentLSMFilePath + "_tiles/resized/TileConfiguration.registered.txt"
	scale_info = estimate_scale_multiplier(parentLSMFilePath+"_tiles/tile_1.ome.tif",parentLSMFilePath+"_tiles/resized/tile_1.tif")
	print scale_info
	coords_list = read_tileconfig_file(tileConfigFilePath)
	coords_normed = normalize_coords_in_list(coords_list)
	coords_upscaled = round_coords(upscale_coords(coords_normed,scale_info[0]))
	write_tileconfig_file(parentLSMFilePath+"_tiles/v_img/TileConfiguration.fullsize.txt",coords_upscaled,".ome.tif")
	max_coords = get_max_coordinates(coords_upscaled)
	print max_coords
	print basic_info

	## Outputs each stitched z plane as a separate file
	iReader = ImageReader()
	iReader.setId(parentLSMFilePath)
	for z in range(max_coords[2]+basic_info[4]):
	## for z in range(50,51):
		IJ.showStatus("z: "+str(z+1)+" of "+str(max_coords[2]+basic_info[4]))
		chIps = []
		resImages = []
		for ch in range(basic_info[0]):
			chIps.append(ByteProcessor(max_coords[0]+scale_info[2],max_coords[1]+scale_info[2]))
		for ch in range(basic_info[0]):
			resImages.append(ImagePlus("ch"+str(ch+1),chIps[ch]))
		for se in range(basic_info[1]):
			IJ.showProgress(se,basic_info[1])
			if z >= coords_upscaled[se][2] and z <= coords_upscaled[se][2]+basic_info[4]-1:
				iReader.setSeries(se)
				for ch in range(basic_info[0]):
					byteArray = iReader.openBytes((z-coords_upscaled[se][2])*basic_info[0]+ch)
예제 #16
0
    def openfile(imagefile,
                 stitchtiles=True,
                 setflatres=False,
                 readpylevel=0,
                 setconcat=True,
                 openallseries=True,
                 showomexml=False,
                 attach=False,
                 autoscale=True,
                 imageID=0):

        # stitchtiles = option of CZIReader to return the raw tiles as
        # individual series rather than the auto-stitched images

        metainfo = {}
        # checking for thr file Extension
        metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile))

        # initialite the reader and get the OME metadata
        reader = ImageReader()
        omeMeta = MetadataTools.createOMEXMLMetadata()
        metainfo['ImageCount_OME'] = omeMeta.getImageCount()
        reader.setMetadataStore(omeMeta)
        reader.setId(imagefile)
        metainfo['SeriesCount_BF'] = reader.getSeriesCount()
        reader.close()

        # read dimensions TZCXY from OME metadata
        metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue()
        metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue()
        metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue()
        metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue()
        metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue()

        # store info about stack
        if metainfo['SizeZ'] == 1:
            metainfo['is3d'] = False
        elif metainfo['SizeZ'] > 1:
            metainfo['is3d'] = True

        # get the scaling for XYZ
        physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
        physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
        physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

        if physSizeX is not None:
            metainfo['ScaleX'] = round(physSizeX.value(), 3)
            metainfo['ScaleY'] = round(physSizeX.value(), 3)
        if physSizeX is None:
            metainfo['ScaleX'] = None
            metainfo['ScaleY'] = None

        if physSizeZ is not None:
            metainfo['ScaleZ'] = round(physSizeZ.value(), 3)
        if physSizeZ is None:
            metainfo['ScaleZ'] = None

        # if image file is Carl Zeiss Image - CZI
        if metainfo['Extension'] == '.czi':

            # read the CZI file using the CZIReader
            # pylevel = 0 - read the full resolution image

            imp, metainfo = ImportTools.readCZI(imagefile, metainfo,
                                                stitchtiles=stitchtiles,
                                                setflatres=setflatres,
                                                readpylevel=readpylevel,
                                                setconcat=setconcat,
                                                openallseries=openallseries,
                                                showomexml=showomexml,
                                                attach=attach,
                                                autoscale=autoscale)

        # if image file is not Carl Zeiss Image - CZI
        if metainfo['Extension'] != '.czi':

            # read the imagefile using the correct method
            if metainfo['Extension'].lower() == ('.jpg' or '.jpeg'):
                # use dedicated method for jpg
                imp, metainfo = ImageTools.openjpg(imagefile, method='IJ')
            else:
                # if not jpg - use BioFormats
                imp, metainfo = ImportTools.readbf(imagefile, metainfo,
                                                   setflatres=setflatres,
                                                   readpylevel=readpylevel,
                                                   setconcat=setconcat,
                                                   openallseries=openallseries,
                                                   showomexml=showomexml,
                                                   autoscale=autoscale)

        return imp, metainfo
예제 #17
0
def process(filename):
	TEMPLATE_GENERAL 		= "The data was acquired on a {ID} microscope, using a {objective} {NA} NA objective. The pixel size was {pxx_microns} microns. "
	TEMPLATE_CHANNEL		= "The excitation and emission wavelengths for channel {ch} were {ex} and {em} and the {exposureTime} was {et}. "
	TEMPLATE_3D				= "A series of slices was collected with a step size of {pzz_microns} microns. "
	TEMPLATE_TIME			= "Images were acquired with a time interval of {timeInterval}. "
	
	BLURB = ""
	
	# Admin stuff
	import sys
	
	from org.scijava.ui.swing.console import LoggingPanel
	logger.addLogListener( LoggingPanel(context) );
	
	logger.info(filename.getAbsolutePath())
	
	# Get a BioFormats reader
	from loci.formats import ImageReader
	ir = ImageReader()
	
	# Adapted from https://github.com/ome/bioformats/blob/develop/components/formats-gpl/utils/GetPhysicalMetadata.java
	m = omeservice.createOMEXMLMetadata()
	
	ir.setMetadataStore(m)
	ir.setId(filename.getAbsolutePath())
	
	# Some checks
	ninstruments 	= m.getInstrumentCount()
	if ninstruments > 1:
		logger.error("More than one instrument found. Automatic generation will not work...")
	if ninstruments == 0:
		logger.error("No instrument metadata found! Automatic generation will not work...")
	
	# Manufacturer and modalities
	try:
		ID = m.getMicroscopeManufacturer(0)
	except:
		logger.error(sys.exc_info()[0])
		ID = None
		
	if ID == None:
		ff = str(ir.getFormat())
		if "Zeiss" in ff:
			ID="Zeiss"
		elif "Nikon" in ff:
			ID="Nikon"
	
			tID = ir.getMetadataValue("m_sMicroscopePhysFullName")
			if tID is not None:
				ID = tID
				
		elif "Olympus" in ff:
			ID="Olympus"
		else:
			ID=""
	
	for ic in range(ir.getSizeC()):
		mode = m.getChannelAcquisitionMode(0,ic)
	
		if ic>0 and mode != mode0:
			logger.warn("WARNING : Not all channels were acquired with the same modality..")
		else:
			mode0=mode
	
	if mode == None:
		mode_with_spaces = "UNKNOWN"
	else:
		mode_with_spaces = ""
		if str(mode) == "TIRF":
			mode_with_spaces = str(mode)
		else:
			for letter in str(mode):
				if letter.isupper():
					mode_with_spaces += " "+letter.lower()
				else:
					mode_with_spaces += letter
	
	ID+=" "+str(mode_with_spaces.strip())
	
	if ninstruments == 1:
		nobjectives		= m.getObjectiveCount(0)
		if nobjectives > 1:
			logger.error("More than one objective found. Automatic generation will generate information for the first objective only.")
	
	objective = "UNKNOWN"
	if ninstruments == 1 and nobjectives >0:
		try:
			magnification1 	= m.getObjectiveNominalMagnification(0,0)
	
			if magnification1 != None:
				objective = "{:.0f}x".format(magnification1)
		except:
			logger.error(sys.exc_info()[0])
			msg = "Could not extract information about the objective! The image might be missing some crucial metadata."
			logger.error(msg)
	
	if objective == "UNKNOWN":
		if "Nikon" in ff:
			objective0 = str(ir.getMetadataValue("sObjective"))
			if objective0 is not None:
				objective = objective0
			
	
	NA = "UNKNOWN"
	if ninstruments == 1 and nobjectives >0:
		try:
			NA1 = m.getObjectiveLensNA(0,0)
			
			if NA1 != None:
				NA = str(NA1)
		except:
			msg = "Could not extract information about the objective! The image might be missing some crucial metadata."
			logger.error(msg)
				
	NAm = ir.getMetadataValue("Numerical Aperture")
	if NA=="UNKNOWN" and "Nikon" in ff and NAm is not None:
		NA = str(NAm)
	#else:
	#	HT=ir.getGlobalMetadata()
	#	for k in HT.keys():
	#		print "{}={}".format(k,HT.get(k))
	
	# Pixel size
	nimages = m.getImageCount()
	logger.info("Found {} images".format(nimages))
	
	from ome.units import UNITS
	
	pxx_microns = "UNKNOWN"
	if ninstruments==1 and nobjectives>0:
		try:
			pxx_microns = "{:.2f}".format(m.getPixelsPhysicalSizeX(0).value(UNITS.MICROMETER))
		except:
			logger.error(sys.exc_info()[0])
			msg = "Could not extract physical pixel size! The image might be missing some crucial metadata."
			logger.error(msg)
	
	# Is it 3D?
	is3D = ir.getSizeZ()>1
	
	pzz_microns = "UNKNOWN"
	
	if ninstruments==1 and nobjectives>0:
		try:
			pzz_microns = "{:.2f}".format(m.getPixelsPhysicalSizeZ(0).value(UNITS.MICROMETER))
		except:
			logger.error(sys.exc_info()[0])
			msg = "This image is 3D but I could not extract physical step size! The image might be missing some crucial metadata."
			logger.error(msg)
	
	
	
	# TODO Is it a time series?
	
	# GENERAL BLURB GENERATION
	BLURB += TEMPLATE_GENERAL.format(ID=ID, objective=objective, NA=NA, pxx_microns=pxx_microns)
	if is3D:
		BLURB += TEMPLATE_3D.format(pzz_microns=pzz_microns)
	
	# Extract channel information
	for ic in range(ir.getSizeC()):
		try:
			ex0 = m.getChannelExcitationWavelength(0,ic)
			
			if ex0==None:
				ex = "UNKNOWN"
			else:
				ex="{:.0f} nm".format(ex0.value(UNITS.NANOMETER))
		except:
			logger.error(sys.exc_info()[0])
			logger.error("Wasn't able to extract channel wavelength information for channel {}.".format(ic+1))
			continue
	
		try:
			em0 = m.getChannelEmissionWavelength(0,ic)
			
			if em0==None:
				em = "UNKNOWN"
			else:
				em="{:.0f} nm".format(em0.value(UNITS.NANOMETER))
		except:
			logger.error(sys.exc_info()[0])
			logger.error("Wasn't able to extract channel wavelength information for channel {}.".format(ic+1))
			continue
	
		#try:
		ix = ir.getIndex(0, ic, 0)		# NOTE : First z plane, first timepoint only
		et = m.getPlaneExposureTime(0,ix)
	
		if et==None:
			et = "UNKNOWN"
		else:
			etms = et.value(UNITS.MILLISECOND)

			if "CZI" in ff: # TODO Check if error is across other images
				logger.warn("The exposure time was divided by 1000 to account for ms mistaken as s in CZI files")
				
				etms = etms/1000
				
			if etms<1000:
				et=str("{:.2f} ms".format(etms))
			else:
				et=str("{} s".format(etms/1000))
	
				if etms/1000>600:
					logger.warn("Exposure time for channel {} is {}s. That's longer than 10m, please double check metadata to make sure it's correct".format(ic+1,etms/1000))
	
		BLURB += TEMPLATE_CHANNEL.format(ch=ic+1, ex=ex, exposureTime="exposure time", et=et, em=em)		
		#except:
		#	logger.error("Wasn't able to extract channel {} exposure time information.".format(ic+1))

	return BLURB
예제 #18
0
from net.imglib2.type.numeric.integer import UnsignedByteType
import net.imglib2.type.logic.BitType
import net.imglib2.algorithm.neighborhood.HyperSphereShape
from net.imglib2.type.numeric.real import FloatType,DoubleType
from ij.measure import ResultsTable
from net.imagej.ops import Ops
from loci.plugins.in import ImporterOptions
options = ImporterOptions()
options.setId(Input_File.getAbsolutePath())
from loci.formats import ImageReader
from loci.formats import MetadataTools
#get import ready and import
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(Input_File.getAbsolutePath())
seriesCount = reader.getSeriesCount()
reader.close()
#open image
imp, = BF.openImagePlus(options)
#get output path variable
outdir=Output_File.getAbsolutePath()
#get input path variable
inpu=Input_File.getAbsolutePath()
#convert to RGB
IC(imp).convertToRGB()
#show image
imp.show()
#Define ROI of whole image (basically)
imp.setRoi(1,1,478,479)
def processFile(filename, inDir, outDir, dichroics, mergeList):

	if mergeList is None:
		merge = False
	else:
		merge = True
	
	filenameExExt = os.path.splitext(filename)[0]
	filepath = inDir + filename
      
	# parse metadata
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(filepath)
	numChannels = reader.getSizeC()
	numSlices = reader.getSizeZ()
	numFrames = reader.getSizeT()
	seriesCount = reader.getSeriesCount()

	globalMetadata = reader.getGlobalMetadata()
	seriesMetadata = reader.getSeriesMetadata()

	objLensName = globalMetadata['- Objective Lens name #1']

	areaRotation = float(seriesMetadata['area rotation #1'])
	acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1'])
	if 'regionInfo rotation #1' in seriesMetadata:
		regionInfoRotation = float(seriesMetadata['regionInfo rotation #1'])
	else:
		regionInfoRotation = float(0)

	totalRotation = areaRotation + regionInfoRotation
	physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
	physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
	pxSizeX = physSizeX.value(UNITS.MICROM)
	pxSizeY = physSizeY.value(UNITS.MICROM)

	# log metadata
	IJ.log("\nMETADATA")
	#IJ.log("Filename: " + filepath)
	IJ.log("Number of series: " + str(seriesCount))
	IJ.log("Number of channels: " + str(numChannels))
	IJ.log("Number of frames: " + str(numFrames))
	IJ.log("Number of slices: " + str(numSlices))
	IJ.log("Objective lens: " + objLensName)
	IJ.log("FOV rotation: " + str(areaRotation))
	IJ.log("ROI rotation: " + str(regionInfoRotation))
	IJ.log("Total rotation: " + str(totalRotation))
	IJ.log("Pixel size:")
	IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol())
	IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol())
  
	if merge:
		tifDir = outDir + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated temporary folder: " + tifDir + "\n")
		else:
			IJ.log("Unable to create temporary folder!\n")
	else:
		tifDir = outDir + filenameExExt + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated subfolder: " + tifDir + "\n")
		else:
			IJ.log("\nSubfolder " + tifDir +  " already exists.\n")

	# correct images
	tifFilePaths = []
	for i in range(numChannels):
		ip = extractChannel(oirFile=filepath, ch=i)
		if dichroics[i] == "DM1":
			IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.")
		else:
			offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]])
			xom = offsets['x']
			yom = offsets['y']
			if abs(totalRotation) > 0.1:
				rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation)
				xom = rotOff['x']
				yom = rotOff['y']
			xop = int(round(xom/pxSizeX))
			yop = int(round(yom/pxSizeY))
			IJ.log("Channel " + str(i+1) + " offsets")
			IJ.log("\t\tMicrometres")
			IJ.log("\t\t\t\tx = " + str(xom))
			IJ.log("\t\t\t\ty = " + str(yom))
			IJ.log("\t\tPixels")
			IJ.log("\t\t\t\tx = " + str(xop))
			IJ.log("\t\t\t\ty = " + str(yop))
			IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack")

		tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif"
		tifFilePaths.append(tifFilePath)
		if os.path.exists(tifFilePath):
			IJ.log("\nOutput file exists: " + tifFilePath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
			return
		FileSaver(ip).saveAsTiff(tifFilePath)

	if merge:
		max_list = []
		for i in range(len(mergeList)):
			if mergeList[i] != None:
				mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]])
				channel = mergeList[i]#https://python.hotexamples.com/examples/ij.plugin/RGBStackMerge/mergeChannels/python-rgbstackmerge-mergechannels-method-examples.html
				projector = ZProjector(channel)
				projector.setMethod(ZProjector.MAX_METHOD)
				projector.doProjection()
				max_list.append(projector.getProjection())
		merged = RGBStackMerge.mergeChannels(mergeList, False)
		merged_max = RGBStackMerge.mergeChannels(max_list, False)
		mergedChannelFilepath = outDir + filenameExExt + ".tif"
		maxMergedChannelFilepath = outDir + filenameExExt + "_max.tif"
		if os.path.exists(mergedChannelFilepath):
			IJ.log("\nOutput file exists: " + mergedChannelFilepath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
		FileSaver(merged).saveAsTiff(mergedChannelFilepath)
		FileSaver(merged_max).saveAsTiff(maxMergedChannelFilepath)
		for tf in tifFilePaths:
			os.remove(tf)
		os.rmdir(tifDir)	

	IJ.log("\nFinished processing file:\n" + filepath + "\n")
	if merge:
		IJ.log("Image file with channels aligned:\n" + outDir + filenameExExt + ".tif\n")
	else:
		IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
예제 #20
0
def processMovie(root, files, outfile):
    """Concatenate images and write ome.tiff file.
    If image contains already multiple time points just copy the image"""

    files.sort()

    options = ImporterOptions()
    options.setId(files[0])
    options.setVirtual(1)

    image = BF.openImagePlus(options)
    image = image[0]
    if image.getNFrames() > 1:
        msg = ("%s Contains multiple time points. Can only concatenate"
               " single time points!" %files[0])
        raise RuntimeError(msg)
        image.close()

    reader = ImageReader()
    reader.setMetadataStore(MetadataTools.createOMEXMLMetadata())
    reader.setId(files[0])
    timeInfo = []
    omeOut = reader.getMetadataStore()
    omeOut = setUpXml(omeOut, image, files)
    reader.close()
    image.close()
    itime = 0

    for fileName in files:
        omeMeta = MetadataTools.createOMEXMLMetadata()
        reader.setMetadataStore(omeMeta)
        reader.setId(fileName)
        timeInfo.append(getTimePoint(reader, omeMeta))

        nrImages = reader.getImageCount()
        for i in range(0, reader.getImageCount()):
            try:
                dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2)
            except:
                dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds
            omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages)
            omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages)
            omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages)
            omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheT(omeOut.getPlaneTheT(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages)
        itime = itime + 1
        reader.close()

        IJ.showProgress(files.index(fileName), len(files))

    try:
        omeOut.setPixelsTimeIncrement(float(dT/(len(files)-1)), 0)
    except:
        omeOut.setPixelsTimeIncrement(0, 0)

    if len(files) <= 1:
        raise RuntimeError('Found only one file. Nothing to concatenate')

    outfile = concatenateImagePlus(files, outfile)
    filein = RandomAccessInputStream(outfile)
    fileout = RandomAccessOutputStream(outfile)
    saver = TiffSaver(fileout, outfile)
    saver.overwriteComment(filein, omeOut.dumpXML())
    fileout.close()
    filein.close()
sys.path.append(path.abspath(path.dirname(__file__)))
from functions.czi_structure import get_data_structure, get_binning_factor, open_czi_series, \
    get_maxres_indexes
from functions.image_manipulation import extractChannel

piramid_to_open = 1
channel_to_save = 3
final_resolution = 5

# Main
if __name__ in ['__builtin__', '__main__']:
    # get the file
    input_path = IJ.getFilePath("Choose a .czi file")
    reader = ImageReader()
    reader.setId(input_path)
    metadata_list = reader.getCoreMetadataList()
    # slide scanner makes a piramid of X for every ROI you draw
    # resolution is not updated in the metadata so it needs to be calculated manually
    number_of_images, num_of_piramids_list = get_data_structure(metadata_list)
    IJ.log("Number of images is " + str(number_of_images))
    # set names of subimages in the list, waiting to compare to current outputs
    file_core_name = path.basename(input_path).split('.czi')[0]
    # get the indexes of the maximum resolution images
    max_res_indexes = get_maxres_indexes(num_of_piramids_list)
    IJ.log("Number of pyramids are " + str(num_of_piramids_list))
    # set names of subimages in the list, waiting to compare to current outputs
    possible_slices = [
        file_core_name + "_slice-" + str(n) for n in range(number_of_images)
    ]
def nucleus_detection(infile, nucleus_channel, stacksize, animation):
	# Detect nucleus with 3d log filters
    fullpath = infile
    infile = filename(infile)
    IJ.log("Start Segmentation " + str(infile))
    # First get Nb Stacks
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(fullpath)
    default_options = "stack_order=XYCZT color_mode=Composite view=Hyperstack specify_range c_begin=" + \
        str(nucleus_channel) + " c_end=" + str(nucleus_channel) + \
        " c_step=1 open=[" + fullpath + "]"
    NbStack = reader.getSizeZ()
    reader.close()
    output = re.sub('.ids', '.csv', infile)
    with open(os.path.join(folder5, output), 'wb') as outfile:
        DETECTwriter = csv.writer(outfile, delimiter=',')
        DETECTwriter.writerow(
            ['spotID', 'roundID', 'X', 'Y', 'Z', 'QUALITY', 'SNR', 'INTENSITY'])
    rounds = NbStack // stacksize
    spotID = 1
    for roundid in xrange(1, rounds + 2):
        # Process stacksize by stacksize otherwise crash because too many spots
        Zstart = (stacksize * roundid - stacksize + 1)
        Zend = (stacksize * roundid)
        if(Zend > NbStack):
            Zend = NbStack % stacksize + (roundid - 1) * stacksize
        IJ.log("Round:" + str(roundid) + ' Zstart=' + str(Zstart) +
               ' Zend=' + str(Zend) + ' out of ' + str(NbStack))
        IJ.run("Bio-Formats Importer", default_options + " z_begin=" +
               str(Zstart) + " z_end=" + str(Zend) + " z_step=1")
        imp = IJ.getImage()
        imp.show()
        cal = imp.getCalibration()
        model = Model()
        settings = Settings()
        settings.setFrom(imp)
        # Configure detector - Manually determined as best
        settings.detectorFactory = LogDetectorFactory()
        settings.detectorSettings = {
            'DO_SUBPIXEL_LOCALIZATION': True,
            'RADIUS': 5.5,
            'TARGET_CHANNEL': 1,
            'THRESHOLD': 50.0,
            'DO_MEDIAN_FILTERING': False,
        }
        filter1 = FeatureFilter('QUALITY', 1, True)
        settings.addSpotFilter(filter1)
        settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory())
        settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory())
        settings.trackerFactory = SparseLAPTrackerFactory()
        settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap()

        trackmate = TrackMate(model, settings)
        ok = trackmate.checkInput()
        if not ok:
            sys.exit(str(trackmate.getErrorMessage()))
        try:
            ok = trackmate.process()
        except:
            IJ.log("Nothing detected, Round:" + str(roundid) + ' Zstart=' +
                   str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack))
            IJ.selectWindow(infile)
            IJ.run('Close')
            continue
        else:
            if animation:
                # For plotting purpose only
                imp.setPosition(1, 1, imp.getNFrames())
                imp.getProcessor().setMinAndMax(0, 4000)
                selectionModel = SelectionModel(model)
                displayer = HyperStackDisplayer(model, selectionModel, imp)
                displayer.render()
                displayer.refresh()
                for i in xrange(1, imp.getNSlices() + 1):
                    imp.setSlice(i)
                    time.sleep(0.05)
            IJ.selectWindow(infile)
            IJ.run('Close')
            spots = model.getSpots()
            spotIt = spots.iterator(0, False)
            sid = []
            sroundid = []
            x = []
            y = []
            z = []
            q = []
            snr = []
            intensity = []
            for spot in spotIt:
                sid.append(spotID)
                spotID = spotID + 1
                sroundid.append(roundid)
                x.append(spot.getFeature('POSITION_X'))
                y.append(spot.getFeature('POSITION_Y'))
                q.append(spot.getFeature('QUALITY'))
                snr.append(spot.getFeature('SNR'))
                intensity.append(spot.getFeature('MEAN_INTENSITY'))
                # Correct Z position
                correct_z = spot.getFeature(
                    'POSITION_Z') + (roundid - 1) * float(stacksize) * cal.pixelDepth
                z.append(correct_z)
            with open(os.path.join(folder5, output), 'ab') as outfile:
                DETECTwriter = csv.writer(outfile, delimiter=',')
                Sdata = zip(sid, sroundid, x, y, z, q, snr, intensity)
                for Srow in Sdata:
                    DETECTwriter.writerow(Srow)
def get_ome_metadata(source, imagenames):
    """Get the stage coordinates and calibration from the ome-xml for a given list of images

    Arguments:
        source {string} -- Path to the images
        imagenames {list} -- list of images filenames

    Returns:
        a tuple that contains:
        dimensions {int} -- number of dimensions (2D or 3D)
        stage_coordinates_x {list} -- the abosolute stage x-coordinates from ome-xml metadata
        stage_coordinates_y {list} -- the abosolute stage y-coordinates from ome-xml metadata
        stage_coordinates_z {list} -- the abosolute stage z-coordinates from ome-xml metadata
        relative_coordinates_x_px {list} -- the relative stage x-coordinates in px
        relative_coordinates_y_px {list} -- the relative stage y-coordinates in px
        relative_coordinates_z_px {list} -- the relative stage z-coordinates in px
        image_calibration {list} -- x,y,z image calibration in unit/px
        calibration_unit {string} -- image calibration unit
        image_dimensions_czt {list} -- number of images in dimensions c,z,t
    """

    # open an array to store the abosolute stage coordinates from metadata
    stage_coordinates_x = []
    stage_coordinates_y = []
    stage_coordinates_z = []

    for counter, image in enumerate(imagenames):

        # parse metadata
        reader = ImageReader()
        omeMeta = MetadataTools.createOMEXMLMetadata()
        reader.setMetadataStore(omeMeta)
        reader.setId(source + str(image))

        # get hyperstack dimensions from the first image
        if counter == 0:
            frame_size_x = reader.getSizeX()
            frame_size_y = reader.getSizeY()
            frame_size_z = reader.getSizeZ()
            frame_size_c = reader.getSizeC()
            frame_size_t = reader.getSizeT()

            # note the dimensions
            if frame_size_z == 1:
                dimensions = 2
            if frame_size_z > 1:
                dimensions = 3

            # get the physical calibration for the first image series
            physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
            physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
            physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

            # workaround to get the z-interval if physSizeZ.value() returns None.
            z_interval = 1
            if physSizeZ is not None:
                z_interval = physSizeZ.value()

            if frame_size_z > 1 and physSizeZ is None:
                print "no z calibration found, trying to recover"
                first_plane = omeMeta.getPlanePositionZ(0, 0)
                next_plane_imagenumber = frame_size_c + frame_size_t - 1
                second_plane = omeMeta.getPlanePositionZ(
                    0, next_plane_imagenumber)
                z_interval = abs(
                    abs(first_plane.value()) - abs(second_plane.value()))
                print "z-interval seems to be: ", z_interval

            # create an image calibration
            image_calibration = [
                physSizeX.value(),
                physSizeY.value(), z_interval
            ]
            calibration_unit = physSizeX.unit().getSymbol()
            image_dimensions_czt = [frame_size_c, frame_size_z, frame_size_t]

        reader.close()

        # get the plane position in calibrated units
        current_position_x = omeMeta.getPlanePositionX(0, 0)
        current_position_y = omeMeta.getPlanePositionY(0, 0)
        current_position_z = omeMeta.getPlanePositionZ(0, 0)

        # get the absolute stage positions and store them
        pos_x = current_position_x.value()
        pos_y = current_position_y.value()

        if current_position_z is None:
            print "the z-position is missing in the ome-xml metadata."
            pos_z = 1.0
        else:
            pos_z = current_position_z.value()

        stage_coordinates_x.append(pos_x)
        stage_coordinates_y.append(pos_y)
        stage_coordinates_z.append(pos_z)

    # calculate the store the relative stage movements in px (for the grid/collection stitcher)
    relative_coordinates_x_px = []
    relative_coordinates_y_px = []
    relative_coordinates_z_px = []

    for i in range(len(stage_coordinates_x)):
        rel_pos_x = (stage_coordinates_x[i] -
                     stage_coordinates_x[0]) / physSizeX.value()
        rel_pos_y = (stage_coordinates_y[i] -
                     stage_coordinates_y[0]) / physSizeY.value()
        rel_pos_z = (stage_coordinates_z[i] -
                     stage_coordinates_z[0]) / z_interval

        relative_coordinates_x_px.append(rel_pos_x)
        relative_coordinates_y_px.append(rel_pos_y)
        relative_coordinates_z_px.append(rel_pos_z)

    return (dimensions, stage_coordinates_x, stage_coordinates_y,
            stage_coordinates_z, relative_coordinates_x_px,
            relative_coordinates_y_px, relative_coordinates_z_px,
            image_calibration, calibration_unit, image_dimensions_czt)
		fs.saveAsTiff(new_name)

# Main script

new_dir = make_dir(folder)
# filter for your images, get the files with the accepted endings
listoffiles = [ str(f) for f in os.listdir(folder) if f.endswith(accepted_files)]
# ignore automatically generated hidden files that start with .
real_names = [f for f in listoffiles if not f.startswith(".")]
message = "\n\n >> Now converting the following files: \n"+ "\n --> ".join(real_names) 
IJ.log(message)

for i in real_names:
	image = os.path.join(folder, i)
	print(image)
	message = "\n >> Now processing: \n --> " +str(image)
	IJ.log(message)
	reader = ImageReader()
	# set image id
	reader.setId(image)
	# get series list
	series = reader.getSeriesCount()
	# iterate through series
	for s in range(series):
	       	imps = set_options(image, s)
	       	fixed_name = fix_name(imps)
	       	save_tif(imps, fixed_name, new_dir)

print("DONE!")
IJ.log("\n\n >> DONE!")
예제 #25
0
def run():
    t_start = datetime.now()
    image_paths = glob(os.path.join(str(import_dir.getPath()), '*tif'))

    print '\tread image metadata'
    reader = ImageReader()
    in_meta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(in_meta)

    x_dims = []
    y_dims = []
    z_dims = []
    c_dims = []
    t_dims = []
    eff = []
    spp = []

    for image_path in image_paths:
        print '\t  parse %s' % (image_path)
        reader.setId(image_path)
        x_dims.append(reader.getSizeX())
        y_dims.append(reader.getSizeY())
        z_dims.append(reader.getSizeZ())
        c_dims.append(reader.getSizeC())
        t_dims.append(reader.getSizeT())
        eff.append(reader.imageCount / z_dims[-1] / t_dims[-1])
        spp.append(reader.getSizeC() / eff[-1])

    format = FormatTools.getPixelTypeString(reader.getPixelType())
    series = reader.getSeries()
    big_endian = Boolean.FALSE
    order = reader.getDimensionOrder()
    reader.close()

    # Compute the dimensions of the output file
    x_dim = max(x_dims)
    y_dim = max(y_dims)
    z_dim = max(z_dims)
    c_dim = max(c_dims)
    t_dim = max(t_dims)

    print '\t  series: %i' % series
    print '\t  format: %s' % format
    print '\t  dimension order: %s' % order
    print '\t  x: %s -> %i' % (x_dims, x_dim)
    print '\t  y: %s -> %i' % (y_dims, y_dim)
    print '\t  z: %s -> %i' % (z_dims, z_dim)
    print '\t  c: %s -> %i' % (c_dims, c_dim)
    print '\t  t: %s -> %i' % (t_dims, t_dim)
    print '\t  effective size c: %s' % eff
    print '\t  samples per pixel: %s' % spp

    # Get the time dimension from the number of input files
    t_dim = len(image_paths)

    # TODO: Tried to work out the order with Axes class, got something weird though.
    dimensions = [Short(x_dim), Short(y_dim), Short(c_dim), Short(z_dim)]

    pixels_per_plane = x_dim * y_dim

    # Assemble the metadata for the output file
    out_meta = MetadataTools.createOMEXMLMetadata()
    out_meta.setImageID(MetadataTools.createLSID('Image', series), series)
    out_meta.setPixelsID(MetadataTools.createLSID('Pixels', series), series)
    out_meta.setPixelsBinDataBigEndian(Boolean.TRUE, 0, 0)
    out_meta.setPixelsDimensionOrder(DimensionOrder.fromString(order), series)
    out_meta.setPixelsType(PixelType.fromString(format), series)
    out_meta.setPixelsSizeX(PositiveInteger(x_dim), series)
    out_meta.setPixelsSizeY(PositiveInteger(y_dim), series)
    out_meta.setPixelsSizeZ(PositiveInteger(z_dim), series)
    out_meta.setPixelsSizeC(PositiveInteger(c_dim), series)
    out_meta.setPixelsSizeT(PositiveInteger(t_dim), series)

    for c in range(c_dim):
        out_meta.setChannelID(MetadataTools.createLSID('Channel', series, c),
                              series, c)
        out_meta.setChannelSamplesPerPixel(PositiveInteger(1), series, c)

    # Initialize the BF writer
    result_path = os.path.join(result_dir.getPath(), result_name)
    writer = ImageWriter()
    writer.setMetadataRetrieve(out_meta)
    writer.setId(result_path)
    print '\tcreated to %s' % (result_path)

    # Write the stacks into the output file
    N = len(image_paths)
    for i, image_path in enumerate(image_paths):
        status.showStatus(i, N, "catenating %i of %i time-points" % (i, N))
        print '\t  processing %s' % (image_path)
        ds = io.open(image_path)
        xi = ds.dimensionIndex(Axes.X)
        xv = ds.dimension(xi)
        yi = ds.dimensionIndex(Axes.Y)
        yv = ds.dimension(yi)
        zi = ds.dimensionIndex(Axes.Z)
        zv = ds.dimension(zi)
        ti = ds.dimensionIndex(Axes.TIME)
        tv = ds.dimension(ti)
        ci = ds.dimensionIndex(Axes.CHANNEL)
        cv = ds.dimension(ci)

        dx = float(x_dim - xv) / 2.0
        dy = float(y_dim - yv) / 2.0
        dz = float(z_dim - zv) / 2.0
        print '\t     translation vector (dx, dy, dz) = (%f, %f, %f)' % (
            dx, dy, dz)

        if (dx != 0) or (dy != 0) or (dz != 0):
            stk = Views.translate(ds, long(dx), long(dy), long(0), long(dz))
            stk = Views.extendZero(stk)
        else:
            stk = Views.extendZero(ds.getImgPlus().getImg())

        print '\t     writing planes ',
        n = 0
        plane = 1
        byte_array = []
        interval_view = Views.interval(stk, \
                                       [Long(0), Long(0), Long(0), Long(0)], \
                                       [Long(x_dim - 1), Long(y_dim - 1), Long(c_dim - 1), Long(z_dim - 1)])
        cursor = interval_view.cursor()
        while cursor.hasNext():
            n += 1
            cursor.fwd()
            value = cursor.get().getInteger()
            bytes = DataTools.shortToBytes(value, big_endian)
            byte_array.extend(bytes)

            if n == pixels_per_plane:
                writer.saveBytes(plane - 1, byte_array)

                print '.',
                if ((plane) % 10) == 0:
                    print '\n\t                    ',

                byte_array = []
                plane += 1
                n = 0

        print ' '

    writer.close()
    t = datetime.now() - t_start
    print '\twrote %i planes to %s in %i sec.' % (plane - 1, result_path,
                                                  t.total_seconds())
    print '... done.'
예제 #26
0
from ij import IJ
from ij.io import OpenDialog
from loci.formats import ImageReader
from loci.formats import MetadataTools

# open file
od = OpenDialog("Choose a file");
filepath = od.getPath()
print("Image path: " + filepath);

# use bio-formats to extract information
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(filepath)

seriesCount = reader.getSeriesCount()
print "Series count:",seriesCount
reader.close()


@String(label="Image Extension", style="", value=".tif") str_img_ext
file = str_dir + "/" + str_img_nam + str_img_ext
# read in and display ImagePlus object(s)
from loci.plugins import BF
from loci.formats import ImageReader
from loci.formats import MetadataTools
from ij import IJ
from ome.units import UNITS

imps = BF.openImagePlus(file)
for imp in imps:
    imp.show()
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(file)
    seriesCount = reader.getSeriesCount()
    reader.close()
    # print out series count from two different places (they should always match!)
    imageCount = omeMeta.getImageCount()
    IJ.log("Total # of image series (from BF reader): " + str(seriesCount))
    IJ.log("Total # of image series (from OME metadata): " + str(imageCount))
    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)
    IJ.log("Physical calibration:")
    if (physSizeX is not None):
    	IJ.log("\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol()
		+ " = " + str(physSizeX.value(UNITS.MICROM)) + " microns")
	if (physSizeY is not None):
		IJ.log("\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol()
예제 #28
0
def readczi(imagefile,
            stitchtiles=True,
            setflatres=False,
            readpylevel=0,
            setconcat=True,
            openallseries=True,
            showomexml=False,
            attach=False,
            autoscale=True):

    log.info('Filename : ' + imagefile)

    metainfo = {}
    # checking for thr file Extension
    metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile))
    log.info('Detected File Extension : ' + metainfo['Extension'])

    # initialize the reader and get the OME metadata
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    #metainfo['ImageCount_OME'] = omeMeta.getImageCount()
    reader.setMetadataStore(omeMeta)
    reader.setId(imagefile)
    metainfo['SeriesCount_BF'] = reader.getSeriesCount()
    reader.close()

    # get the scaling for XYZ
    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

    if physSizeX is not None:
        metainfo['ScaleX'] = round(physSizeX.value(), 3)
        metainfo['ScaleY'] = round(physSizeX.value(), 3)

    if physSizeX is None:
        metainfo['ScaleX'] = None
        metainfo['ScaleY'] = None

    if physSizeZ is not None:
        metainfo['ScaleZ'] = round(physSizeZ.value(), 3)
    if physSizeZ is None:
        metainfo['ScaleZ'] = None

    options = DynamicMetadataOptions()
    options.setBoolean("zeissczi.autostitch", stitchtiles)
    options.setBoolean("zeissczi.attachments", attach)

    czireader = ZeissCZIReader()
    czireader.setFlattenedResolutions(setflatres)
    czireader.setMetadataOptions(options)
    czireader.setId(imagefile)

    # Set the preferences in the ImageJ plugin
    # Note although these preferences are applied, they are not refreshed in the UI
    Prefs.set("bioformats.zeissczi.allow.autostitch", str(stitchtiles).lower())
    Prefs.set("bioformats.zeissczi.include.attachments", str(attach).lower())

    # metainfo = {}
    metainfo['rescount'] = czireader.getResolutionCount()
    metainfo['SeriesCount_CZI'] = czireader.getSeriesCount()
    #metainfo['flatres'] = czireader.hasFlattenedResolutions()
    #metainfo['getreslevel'] = czireader.getResolution()

    # Dimensions
    metainfo['SizeT'] = czireader.getSizeT()
    metainfo['SizeZ'] = czireader.getSizeZ()
    metainfo['SizeC'] = czireader.getSizeC()
    metainfo['SizeX'] = czireader.getSizeX()
    metainfo['SizeY'] = czireader.getSizeY()

    # check for autostitching and possibility to read attachment
    metainfo['AllowAutoStitching'] = czireader.allowAutostitching()
    metainfo['CanReadAttachments'] = czireader.canReadAttachments()

    # read in and display ImagePlus(es) with arguments
    options = ImporterOptions()
    options.setOpenAllSeries(openallseries)
    options.setShowOMEXML(showomexml)
    options.setConcatenate(setconcat)
    options.setAutoscale(autoscale)
    options.setId(imagefile)

    # open the ImgPlus
    imps = BF.openImagePlus(options)
    metainfo['Pyramid Level Output'] = readpylevel + 1

    try:
        imp = imps[readpylevel]
        pylevelout = metainfo['SeriesCount_CZI']
    except:
        # fallback option
        log.info('PyLevel=' + str(readpylevel) + ' does not exist.')
        log.info('Using Pyramid Level = 0 as fallback.')
        imp = imps[0]
        pylevelout = 0
        metainfo['Pyramid Level Output'] = pylevelout

    # get the stack and some info
    imgstack = imp.getImageStack()
    metainfo['Output Slices'] = imgstack.getSize()
    metainfo['Output SizeX'] = imgstack.getWidth()
    metainfo['Output SizeY'] = imgstack.getHeight()

    # calc scaling in case of pyramid
    scale = float(metainfo['SizeX']) / float(metainfo['Output SizeX'])
    metainfo['Pyramid Scale Factor'] = scale
    metainfo['ScaleX Output'] = metainfo['ScaleX'] * scale
    metainfo['ScaleY Output'] = metainfo['ScaleY'] * scale

    # set the correct scaling
    imp = MiscTools.setscale(imp, scaleX=metainfo['ScaleX Output'],
                             scaleY=metainfo['ScaleX Output'],
                             scaleZ=metainfo['ScaleZ'],
                             unit="micron")

    # close czireader
    czireader.close()

    return imp, metainfo
예제 #29
0
def get_reader(file, complete_meta):
    reader = ImageReader()
    reader.setMetadataStore(complete_meta)
    reader.setId(file)
    return reader
예제 #30
0
for root, dirs, files in os.walk(inDir):
    for file in files:
        if file.endswith(fileExt):
            logging.info('Starting image #%i (%s)', imageCount, str(file))
            options = ImporterOptions()
            options.setAutoscale(True)
            options.setId(os.path.join(root, file))
            options.setSplitChannels(True)
            imps = BF.openImagePlus(options)
            imageCount += 1
            for imp in imps:
                reader = ImageReader()
                omeMeta = MetadataTools.createOMEXMLMetadata()
                reader.setMetadataStore(omeMeta)
                reader.setId(os.path.join(root, file))

                filename = str(imp)
                channel_id = int(re.findall("C=(\d)", filename)[0])
                channel_name = omeMeta.getChannelName(0, channel_id)
                out_name = filename.split('"')[1]
                out_name = out_name.split(fileExt)[0] + "_" + str(channel_name)
                out_name = out_name.replace(" ", "")

                physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
                physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
                physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)
                stackSizeX = omeMeta.getPixelsSizeX(0).getValue()
                stackSizeY = omeMeta.getPixelsSizeY(0).getValue()
                stackSizeZ = omeMeta.getPixelsSizeZ(0).getValue()
                logging.info('    Saving under: %s', out_name)
예제 #31
0
def process_time_points(root, files, outdir):
	'''Concatenate images and write ome.tiff file. If image contains already multiple time points just copy the image'''
	concat = 1
	files.sort()
	options = ImporterOptions()
	options.setId(files[0])
	options.setVirtual(1)
	image = BF.openImagePlus(options)
	image = image[0]
	if image.getNFrames() > 1:
		IJ.log(files[0] + " Contains multiple time points. Can only concatenate single time points! Don't do anything!")
		image.close()
		return
	
	width  = image.getWidth()
	height = image.getHeight()
	for patt in pattern:
		outName = re.match(patt, os.path.basename(files[0]))
		if outName is None:
			continue
		if outdir is None:
			outfile = os.path.join(root, outName.group(1) + '.ome.tif')
		else:
			outfile =  os.path.join(outdir, outName.group(1) + '.ome.tif')
		reader = ImageReader()
		reader.setMetadataStore(MetadataTools.createOMEXMLMetadata())
		reader.setId(files[0])
		timeInfo = []
		omeOut = reader.getMetadataStore()
		omeOut = setUpXml(omeOut, image, files)
		reader.close()
		image.close()
		IJ.log ('Concatenates ' + os.path.join(root, outName.group(1) + '.ome.tif'))
		itime = 0
		try:
			for ifile, fileName in enumerate(files):
				print fileName
				omeMeta = MetadataTools.createOMEXMLMetadata()
	
				reader.setMetadataStore(omeMeta)
				reader.setId(fileName)
				#print omeMeta.getPlaneDeltaT(0,0)
				#print omeMeta.getPixelsTimeIncrement(0)
				
				if fileName.endswith('.czi'):
					if ifile == 0:
						T0 = omeMeta.getPlaneDeltaT(0,0).value()
					dT = omeMeta.getPlaneDeltaT(0,0).value() - T0
					unit =  omeMeta.getPlaneDeltaT(0,0).unit()
				else:
					timeInfo.append(getTimePoint(reader, omeMeta))
	 				unit = omeMeta.getPixelsTimeIncrement(0).unit()
					try:
						dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2)
					except:
						dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds
				
				nrImages = reader.getImageCount()
	
	
				for i in range(0, reader.getImageCount()):
	
					try:
						omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages)
					except TypeError:
						omeOut.setPlaneDeltaT(Time(dT, unit),0, i + itime*nrImages)
					omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages)
					omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages)
					omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages)
					omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages)
					omeOut.setPlaneTheT(NonNegativeInteger(itime), 0, i + itime*nrImages)
					omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages)
				itime = itime + 1
				reader.close()
	
				IJ.showProgress(files.index(fileName), len(files))
			try:
				incr = float(dT/(len(files)-1))
			except:
				incr = 0
			
			try:
				omeOut.setPixelsTimeIncrement(incr, 0)
			except TypeError:
				#new Bioformats >5.1.x
				omeOut.setPixelsTimeIncrement(Time(incr, unit),0)
			
			outfile = concatenateImagePlus(files, outfile)
			if outfile is not None:
				filein = RandomAccessInputStream(outfile)
				fileout = RandomAccessOutputStream(outfile)
				saver = TiffSaver(fileout, outfile)
				saver.overwriteComment(filein,omeOut.dumpXML())
				fileout.close()
				filein.close()
	
	
		except:
			traceback.print_exc()
		finally:
			#close all possible open files
			try:
				reader.close()
			except:
				pass
			try:
				filein.close()
			except:
				pass
			try:
				fileout.close()
			except:
예제 #32
0
outdir = sipmm_outputFile.getAbsolutePath()

#imps = BF.openImagePlus()

from ij import IJ
if debugging:
	IJ.run("Close All")

from loci.formats import ImageReader
from loci.formats import MetadataTools
from loci.plugins.in import ImporterOptions
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(sipmm_inputFile.getAbsolutePath())
seriesCount = reader.getSeriesCount()
reader.close()

log('Found {} series'.format(seriesCount))

outfile = os.path.join(outdir,'results.csv')
h = 'Name,path,Rarea,Rmean,Rstd,Garea,Gmean,Gstd,GQarea,GQmean,GQintden,GQstd,nPunctae,RMregions,maxp,extravar'
with open(outfile,'a') as of:
		of.write(h+'\n')

for impi in range(seriesCount):
	log('Analyzing series {}/{}...'.format(impi+1,seriesCount))
	options = ImporterOptions()
	options.setId(sipmm_inputFile.getAbsolutePath())
	options.clearSeries()
def processFile():
	# start logging
	IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n")

	# ask user for file
	ofd = OpenDialog("Choose a file", None)  
	filename = ofd.getFileName()  
  
	if filename is None:  
  		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
  		return

  	directory = ofd.getDirectory()  
  	filepath = directory + filename  
  	IJ.log("File path: " + filepath)

	if not filename.endswith(".oir"):
		IJ.log("Not an Olympus (.oir) file.\nNo image to process.\n")
		return

	filenameExExt = os.path.splitext(filename)[0]
      
	# parse metadata
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(filepath)
	numChannels = reader.getSizeC()
	numSlices = reader.getSizeZ()
	numFrames = reader.getSizeT()
	seriesCount = reader.getSeriesCount()

	globalMetadata = reader.getGlobalMetadata()
	seriesMetadata = reader.getSeriesMetadata()

	objLensName = globalMetadata['- Objective Lens name #1']

	areaRotation = float(seriesMetadata['area rotation #1'])
	acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1'])
	if 'regionInfo rotation #1' in seriesMetadata:
		regionInfoRotation = float(seriesMetadata['regionInfo rotation #1'])
	else:
		regionInfoRotation = float(0)

	totalRotation = areaRotation + regionInfoRotation
	physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
	physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
	pxSizeX = physSizeX.value(UNITS.MICROM)
	pxSizeY = physSizeY.value(UNITS.MICROM)

	# log metadata
	IJ.log("\nMETADATA")
	#IJ.log("Filename: " + filepath)
	IJ.log("Number of series: " + str(seriesCount))
	IJ.log("Number of channels: " + str(numChannels))
	IJ.log("Number of frames: " + str(numFrames))
	IJ.log("Number of slices: " + str(numSlices))
	IJ.log("Objective lens: " + objLensName)
	IJ.log("FOV rotation: " + str(areaRotation))
	IJ.log("ROI rotation: " + str(regionInfoRotation))
	IJ.log("Total rotation: " + str(totalRotation))
	IJ.log("Pixel size:")
	IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol())
	IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol())

	# ask user to identify dichroic mirror used for each channel  
	gdDM = GenericDialog("Dichroic mirrors")
	DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] 
	for i in range(numChannels):
		gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0])
	gdDM.addCheckbox("Merge channels", False) 
	gdDM.showDialog()
	if gdDM.wasCanceled():
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
		return
	dichroics = []
	for i in range(numChannels):
		dichroics.append(gdDM.getNextChoice())
	merge = gdDM.getNextBoolean()
	IJ.log("\nUser selected dichroic mirrors")
	for i in range(numChannels):
		IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i])	

	if merge:
		channels = []
		chDict = {}
		for i in range(numChannels):
			chName = "Channel"+str(i+1)
			channels.append(chName)
			chDict[chName] = i
		channels.append("NONE")
		colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"]
		gdMerge = GenericDialog("Merge channels")
		for c in colourChoices:
			gdMerge.addChoice(c + ":", channels, channels[numChannels])
		gdMerge.showDialog()
		if gdMerge.wasCanceled():
			IJ.log("User canceled the dialog!\nImage processing canceled!\n")
			return
		IJ.log("\nUser selected channel colours")
		mergeList = []
		for i in range(len(colourChoices)):
			ch = gdMerge.getNextChoice()
			if ch == "NONE":
				mergeList.append(None)
			else:
				mergeList.append(chDict[ch])
				IJ.log("\t\t" + colourChoices[i] + ": " + ch)

	# ask user for an output directory
	dc = DirectoryChooser("Choose folder for output")  
	od = dc.getDirectory()    
	if od is None:  
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")  
		return  
  
	if merge:
		tifDir = od + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated temporary folder: " + tifDir + "\n")
		else:
			IJ.log("Unable to create temporary folder!\n")
	else:
		tifDir = od + filenameExExt + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated subfolder: " + tifDir + "\n")
		else:
			IJ.log("\nSubfolder " + tifDir +  " already exists")

	# correct images
	tifFilePaths = []
	for i in range(numChannels):
		ip = extractChannel(oirFile=filepath, ch=i)
		if dichroics[i] == "DM1":
			IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.")
		else:
			offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]])
			xom = offsets['x']
			yom = offsets['y']
			if abs(totalRotation) > 0.1:
				rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation)
				xom = rotOff['x']
				yom = rotOff['y']
			xop = int(round(xom/pxSizeX))
			yop = int(round(yom/pxSizeY))
			IJ.log("Channel " + str(i+1) + " offsets")
			IJ.log("\t\tMicrometres")
			IJ.log("\t\t\t\tx = " + str(xom))
			IJ.log("\t\t\t\ty = " + str(yom))
			IJ.log("\t\tPixels")
			IJ.log("\t\t\t\tx = " + str(xop))
			IJ.log("\t\t\t\ty = " + str(yop))
			IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack")

		tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif"
		tifFilePaths.append(tifFilePath)
		if os.path.exists(tifFilePath):
			IJ.log("\nOutput file exists: " + tifFilePath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
			return
		FileSaver(ip).saveAsTiff(tifFilePath)

	if merge:
		for i in range(len(mergeList)):
			if mergeList[i] != None:
				mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]])
		merged = RGBStackMerge.mergeChannels(mergeList, False)
		mergedChannelFilepath = od + filenameExExt + ".tif"
		if os.path.exists(mergedChannelFilepath):
			IJ.log("\nOutput file exists: " + mergedChannelFilepath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
		FileSaver(merged).saveAsTiff(mergedChannelFilepath)
		for tf in tifFilePaths:
			os.remove(tf)
		os.rmdir(tifDir)
			
	IJ.log("\nFinished processing file:\n" + filepath + "\n")
	if merge:
		IJ.log("Image file with channels aligned:\n" + od + filenameExExt + ".tif\n")
	else:
		IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
def processDirectory():
	# start logging
	IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n")

	# ask user for an input directory
	dc = DirectoryChooser("Choose folder containing Olympus (.oir) files")  
	inputDir = dc.getDirectory() 
	if inputDir is None:  
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")  
		return  
	IJ.log("\nInput directory: " + inputDir + "\n")

	oirFiles = []
	for f in os.listdir(inputDir):
		if f.endswith(".oir"):
			oirFiles.append(f)

	if len(oirFiles) < 1:
		IJ.log("Input directory does not contain any Olympus (.oir) files.\nNo images to process.\n")
		return
  	
	# find out how many channels are in first file (we will assume all files have same number of channels and were acquired using same DMs)
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(inputDir + oirFiles[0])
	numChannels = reader.getSizeC()

	# ask user to identify dichroic mirror used for each channel  
	gdDM = GenericDialog("Dichroic mirrors")
	DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] 
	for i in range(numChannels):
		gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0])
	gdDM.addCheckbox("Merge channels", False) 
	gdDM.showDialog()
	if gdDM.wasCanceled():
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
		return
	dichroics = []
	for i in range(numChannels):
		dichroics.append(gdDM.getNextChoice())
	merge = gdDM.getNextBoolean()
	IJ.log("User selected dichroic mirrors")
	for i in range(numChannels):
		IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i])
	IJ.log("\n")

	if merge:
		channels = []
		chDict = {}
		for i in range(numChannels):
			chName = "Channel"+str(i+1)
			channels.append(chName)
			chDict[chName] = i
		channels.append("NONE")
		colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"]
		gdMerge = GenericDialog("Merge channels")
		for c in colourChoices:
			gdMerge.addChoice(c + ":", channels, channels[numChannels])
		gdMerge.showDialog()
		if gdMerge.wasCanceled():
			IJ.log("User canceled the dialog!\nImage processing canceled!\n")
			return
		IJ.log("User selected channel colours")
		usersMergeList = []
		for i in range(len(colourChoices)):
			ch = gdMerge.getNextChoice()
			if ch == "NONE":
				usersMergeList.append(None)
			else:
				usersMergeList.append(chDict[ch])
				IJ.log("\t\t" + colourChoices[i] + ": " + ch)
		IJ.log("\n\n")

	# ask user for an output directory
	dc = DirectoryChooser("Choose folder for output")  
	outputDir = dc.getDirectory()    
	if outputDir is None:  
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")  
		return  

	counter = 0
	totalFiles = len(oirFiles)
	for o in oirFiles:
		counter +=1
		IJ.log("Processing file " + str(counter) + " of " + str(totalFiles) + "\n")
		IJ.log("File path: " + inputDir + o)
		if merge:
			ml = usersMergeList[:]
		else:
			ml = None
		processFile(o, inputDir, outputDir, dichroics, ml)
		IJ.log("\n--------------------------\n")
예제 #35
0
width = r.getSizeX()
height = r.getSizeY()
md = r.getGlobalMetadata()
# print(type(md))
# print(num, width, height)
stack = ImageStack(width, height)
i = 0
ip = r.openProcessors(i)[0]
stack.addSlice("1", ip);
imp = ImagePlus("foo", stack);
r.close()
imp.show()
IJ.run("Enhance Contrast", "saturated=0.35")

imageReader = ImageReader()
meta = MetadataTools.createOMEXMLMetadata()
imageReader.setMetadataStore(meta)
imageReader.setId(filePath)
pSizeX = meta.getPixelsPhysicalSizeX(0)
pSizeY = meta.getPixelsPhysicalSizeY(0)
imageReader.close()
print(pSizeX, pSizeY)
print(meta.getPixelsSizeX(0))
print(meta.getPixelsSizeY(0))






예제 #36
0
def get_reader(file, complete_meta):
	reader = ImageReader()
	reader.setMetadataStore(complete_meta)
	reader.setId(file)
	return reader
def initreader(vsi_path):
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(vsi_path)
    return(reader)
	def readZeissHeader(self, infoStr):		
		# This is incredibly difficult to get working as (date, time, voxels) are in different obscure places in lsm and czi
		# Furthermore, just trying to read the raw ome xls is futile
		#
		# parsing ome xml as a string and searching it with regular expression(re) does not work
		# it is beyond the scope of my work to figure this out
		# the fact that it does not work and there is little documentaiton is a pretty big waste of time
		#
		# get and parse xml to find date/time
		#fi = self.imp.getOriginalFileInfo(); # returns a FileInfo object
		#omexml = fi.description #omexml is a string
		#omexml = omexml.encode('utf-8')
		#omexml = omexml.replaceAll("[^\\x20-\\x7e]", "") # see: https://stackoverflow.com/questions/2599919/java-parsing-xml-document-gives-content-not-allowed-in-prolog-error

		# (1) try and search the ome xml like a string, this gives errors
		#docsPattern = '<AcquisitionDate>.*</AcquisitionDate>'
		#searchresult = re.search(docsPattern, omexml)
		#print 'searchresult:', searchresult.group(0)
		
		# 2) treat the ome xml like any other xml (because it's xml, right?)
		# well this raises errors too
		#omexml has <AcquisitionDate>2016-08-17T15:21:50</AcquisitionDate>
		#import xml.etree.ElementTree
		#e = xml.etree.ElementTree.fromstring(omexml).getroot()		#print omexml
		#for atype in e.findall('AcquisitionDate'):
		#    print 'AcquisitionDate:', atype #.get('foobar')
		#
		#

		if self.islsm:
			# lsm have date hidden in omeMeta.getImageAcquisitionDate(0)
			# this is copied from code at: https://gist.github.com/ctrueden/6282856
			reader = ImageReader()
			omeMeta = MetadataTools.createOMEXMLMetadata() #omeMeta.getImageAcquisitionDate(0)
			reader.setMetadataStore(omeMeta)
			reader.setId(self.filepath)
			#seriesCount = reader.getSeriesCount()
			dateTimeStr = omeMeta.getImageAcquisitionDate(0) #2016-08-17T16:36:26
			reader.close()
			if dateTimeStr:
				self.dateStr, self.timeStr = dateTimeStr.toString().split('T')
				self.dateStr = bFixDate(self.dateStr)
				self.timeStr = bFixTime(self.timeStr)
				#bPrintLog('LSM date/time is: ' + self.dateStr + ' ' + self.timeStr, 3)
			else:
				bPrintLog('WARNING: did not get Zeiss date/time string')

			# lsm have voxels in infoStr
			for line in infoStr.split('\n'):
				#print line
				if line.find('VoxelSizeX') != -1:
					self.voxelx = float(line.split('=')[1])
				if line.find('VoxelSizeY') != -1:
					self.voxely = float(line.split('=')[1])
				if line.find('VoxelSizeZ') != -1:
					self.voxelz = float(line.split('=')[1])
				if line.find('SizeC') != -1:
					self.numChannels = int(line.split('=')[1])
				#if line.find('BitsPerPixel') and not line.startswith('Experiment') != -1: # 20170811, startswith is for czi
				#	self.bitsPerPixel = int(line.split('=')[1])
				if line.find('RecordingZoomX#1') != -1:
					self.zoom = int(line.split('=')[1])

		if self.isczi:
			# czi has date/time in infoStr (lsm does not)
			for line in infoStr.split('\n'):
				if line.find('CreationDate #1') != -1: # w.t.f. is #1 referring to?
					lhs, rhs = line.split('=')
					rhs = rhs.replace('  ', ' ')
					if rhs.startswith(' '):
						rhs = rhs[1:-1]
					#print "lhs: '" + lhs + "'" + "rhs: '" + rhs + "'"
					if rhs.find('T') != -1:
						self.dateStr, self.timeStr = rhs.split('T')
					else:
						self.dateStr, self.timeStr = rhs.split(' ')
					self.dateStr = bFixDate(self.dateStr)
					self.timeStr = bFixTime(self.timeStr)
					#bPrintLog('CZI date/time is: ' + self.dateStr + ' ' + self.timeStr, 3)
				# .czi
				# <Pixels BigEndian="false" DimensionOrder="XYCZT" ID="Pixels:0" Interleaved="false" PhysicalSizeX="0.20756645602494875" PhysicalSizeXUnit="µm" PhysicalSizeY="0.20756645602494875" PhysicalSizeYUnit="µm" PhysicalSizeZ="0.75" PhysicalSizeZUnit="µm" SignificantBits="8" SizeC="1" SizeT="1" SizeX="1024" SizeY="1024" SizeZ="50" Type="uint8">

			# czi have voxel in calibration
			self.voxelx = self.imp.getCalibration().pixelWidth; 
			self.voxely = self.imp.getCalibration().pixelHeight; 
			self.voxelz = self.imp.getCalibration().pixelDepth; 
			#bPrintLog('readZeissHeader() read czi scale as: ' + str(self.voxelx) + ' ' + str(self.voxely) + ' ' + str(self.voxelz), 3)

			# CLEARING self.infoStr for CZI ... it was WAY to big to parse in Map Manager
			self.infoStr = ''
예제 #39
0
def get_lif_series_length(fpath):
  reader = ImageReader()
  reader.setId(fpath)
  return reader.getSeriesCount();