Esempio n. 1
0
def get_calibration_from_metadata(path_to_image):
    """get the pixel calibration from a given image using Bio-Formats

    Parameters
    ----------
    path_to_image : str
        full path to the input image

    Returns
    -------
    array
        the physical px size as float for x,y,z
    """
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(str(path_to_image))

    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)
    image_calibration = [physSizeX.value(), physSizeY.value(), physSizeZ.value()]
    reader.close()

    return image_calibration
def Z1_metadata(sourcefile):
	# Access header of Z1 lighsheet data to determine nb views
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(sourcefile)
    seriesCount = reader.getSeriesCount()
    reader.close()
    return seriesCount
def getFrameIntervalFromImage(image_path):
    from loci.formats import ImageReader
    from loci.formats import MetadataTools
    r = ImageReader()
    meta = MetadataTools.createOMEXMLMetadata()
    r.setMetadataStore(meta)
    r.setId(image_path)
    frame_interval = meta.getPixelsTimeIncrement(0).value()
    log("Detected frame rate: %s (%s)" % (frame_interval, image_path))
    return frame_interval
Esempio n. 4
0
def getFrameIntervalFromImage(image_path):
    from loci.formats import ImageReader
    from loci.formats import MetadataTools
    r = ImageReader()
    meta = MetadataTools.createOMEXMLMetadata()
    r.setMetadataStore(meta)
    r.setId(image_path)
    frame_interval = meta.getPixelsTimeIncrement(0).value()
    log("Detected frame rate: %s (%s)" % (frame_interval, image_path))
    return frame_interval
Esempio n. 5
0
def meta_parser():
    """ Iterates through .lif XML/OME metadata, returns selected values eg. timepoints, channels, series count, laser power.. """

    # Get metadata.
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(str(Experiment))

    # Extracts number of image series, channel number
    seriesCount = reader.getSeriesCount()
    channels = reader.getSizeC()
    #reader.close()

    # Number of images
    imageCount = omeMeta.getImageCount()

    # Image size in pixels AND microns (for scalebar).
    Physical_x = omeMeta.getPixelsPhysicalSizeX(0)
    Pixel_x = omeMeta.getPixelsSizeX(0)
    Physical_x = Physical_x.value()
    Pixel_x = Pixel_x.getNumberValue()

    # Assumes square image (x=y).
    org_size = (Physical_x*Pixel_x)*2

    # Laser power of donor excitation laser.
    if channels == 3:
        LP = omeMeta.getChannelLightSourceSettingsAttenuation(0,0)
        LP = 1 - LP.getNumberValue()
    else:
        LP = 0



    timelist = []
    for timepoint in range (imageCount):
        times = omeMeta.getImageAcquisitionDate(timepoint)
        timelist.append(times.toString())
	
	
    # YY.MM... to minutes.
    timelist =[ time.mktime(time.strptime(times, u'%Y-%m-%dT%H:%M:%S')) for times in timelist ]
    timelist_unsorted =[ (times - timelist[0])/60 for times in timelist ]

    timelist = sorted(timelist_unsorted)

    # Prints to log.
    IJ.log("Total # of image series (from BF reader): " + str(seriesCount))
    IJ.log("Total # of image series (from OME metadata): " + str(imageCount))
    IJ.log("Total # of channels (from OME metadata): " + str(channels))
    IJ.log("Laserpower (from OME metadata): " + str(LP))
    return channels, seriesCount, timelist, timelist_unsorted, LP, org_size
def run_script():

	input_dir,input_path = get_path()

	inputMeta = MetadataTools.createOMEXMLMetadata()
	outputMeta = MetadataTools.createOMEXMLMetadata()
	reader = get_reader(input_path,inputMeta)
	outputMeta = set_metadata(inputMeta,outputMeta)

	tiles_dir = os.path.join(input_dir,"tiles")
	if not os.path.exists(tiles_dir):
		os.makedirs(tiles_dir)
		sizeZ = inputMeta.getPixelsSizeZ(0).getValue()
		sizeC = inputMeta.getPixelsSizeC(0).getValue()
		sizeT = inputMeta.getPixelsSizeT(0).getValue()
		for theT in range(sizeT):
			write_tiles(reader,tiles_dir,theT,sizeC,sizeZ,outputMeta)
		last_tile = tiles_dir + 'tile_%s.ome.tif'%(sizeT-1)
		while not os.path.exists(last_tile):
   			time.sleep(1)
	reader.close()
	run_stitching(tiles_dir)
	write_fused(tiles_dir,outputMeta)
Esempio n. 7
0
def get_metadata(imagefile, imageID=0):

    metainfo = {}

    # initialize the reader and get the OME metadata
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    metainfo['ImageCount_OME'] = omeMeta.getImageCount()
    reader.setMetadataStore(omeMeta)
    reader.setId(imagefile)
    metainfo['SeriesCount_BF'] = reader.getSeriesCount()
    reader.close()

    # read dimensions TZCXY from OME metadata
    metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue()
    metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue()
    metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue()
    metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue()
    metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue()

    # store info about stack
    if metainfo['SizeZ'] == 1:
        metainfo['is3d'] = False
    elif metainfo['SizeZ'] > 1:
        metainfo['is3d'] = True

    # get the scaling for XYZ
    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

    if physSizeX is not None:
        metainfo['ScaleX'] = round(physSizeX.value(), 3)
        metainfo['ScaleY'] = round(physSizeY.value(), 3)
    if physSizeX is None:
        metainfo['ScaleX'] = None
        metainfo['ScaleY'] = None

    if physSizeZ is not None:
        metainfo['ScaleZ'] = round(physSizeZ.value(), 3)
    if physSizeZ is None:
        metainfo['ScaleZ'] = None

    # sort the dictionary
    metainfo = OrderedDict(sorted(metainfo.items()))

    return metainfo
Esempio n. 8
0
def get_metadata(params):
    """get image metadata, either from the image file or from acquisition-time metadata"""
    if params.metadata_source == "Image metadata":
        try:
            reader = ImageReader()
            ome_meta = MetadataTools.createOMEXMLMetadata()
            reader.setMetadataStore(ome_meta)
            reader.setId(params.input_image_path)
            reader.close()
            params.setFrameInterval(
                ome_meta.getPixelsTimeIncrement(0).value())
            params.setIntervalUnit(
                ome_meta.getPixelsTimeIncrement(0).unit().getSymbol())
            params.setPixelPhysicalSize(
                ome_meta.getPixelsPhysicalSizeX(0).value())
            params.setPixelSizeUnit(
                ome_meta.getPixelsPhysicalSizeX(0).unit().getSymbol())
            params.setMetadataSourceFile(None)
        except Exception as e:
            print(e.message)
            mbui.warning_dialog([
                "There was a problem getting metadata from the image: ",
                e.message,
                "Please consider using acquisition metadata instead (click OK). ",
                "Or, quit the analysis run and investigate image metadata by hand. "
            ])
            params.setMetadataSource("Acquisition metadata")
    if params.metadata_source == "Acquisition metadata":
        od = OpenDialog('Choose acquisition metadata file...',
                        os.path.dirname(params.input_image_path), '*.txt')
        file_path = od.getPath()
        if file_path is None:
            raise IOError('no metadata file chosen')
        acq_metadata_dict = import_iq3_metadata(file_path)
        try:
            params.setFrameInterval(acq_metadata_dict['frame_interval'])
        except KeyError:
            params.setFrameInterval(1.0)
        try:
            params.setIntervalUnit(acq_metadata_dict['time_unit'])
        except KeyError:
            params.setIntervalUnit('frames')
        params.setPixelPhysicalSize(acq_metadata_dict['x_physical_size'])
        params.setPixelSizeUnit(acq_metadata_dict['x_unit'])
        params.setMetadataSourceFile(file_path)
    return params
Esempio n. 9
0
def load_ome_img(file_name):
    """

    :param file_name:
    :return:
    """
    imps = BF.openImagePlus(file_name)
    imag = imps[0]
    # parse metadata
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(file_name)
    print(omeMeta)
    reader.close()

    return (imag, omeMeta)
Esempio n. 10
0
def time_parser():
    """ Iterates through timelapse,                           """
    """ outputs timepoints with corresponding seriesnames.    """
    """ - S. Grødem 2017                                      """
    
    # Get metadata.
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(str(Experiment))

	# Extracts number of image series, channel number
    seriesCount = reader.getSeriesCount()
    reader.close()

    # Gets timepoints, in minutes.
    timelist = []
    namelist = []
    
    for timepoint in range (seriesCount):
        times = omeMeta.getImageAcquisitionDate(timepoint)
        timelist.append(times.toString())
        namelist.append(omeMeta.getImageName(timepoint))

    # YY.MM... to minutes.
    timelist =[ time.mktime(time.strptime(times, u'%Y-%m-%dT%H:%M:%S')) for times in timelist ]
    timelist_unsorted =[ (times - timelist[0])/60 for times in timelist ]

    # Sort timepoints.
    timelist, namelist = zip(*sorted(zip(timelist_unsorted, namelist)))
    timelist = [round(float(i), 3) for i in timelist]

    # Output to IJ log
    images = zip(timelist, namelist)
    IJ.log("Series number: " + str(seriesCount))
    IJ.log("*"*15)
    
    for i in range(len(images)):
        IJ.log("Name: " + str(images[i][1]))
        IJ.log("Time: " + str(images[i][0]))
        IJ.log("-"*15)
def choose_series(filepath, params):
	"""if input file contains more than one image series (xy position), prompt user to choose which one to use"""
	# todo: if necessary (e.g. if lots of series), can improve thumbnail visuals based loosely on https://github.com/ome/bio-formats-imagej/blob/master/src/main/java/loci/plugins/in/SeriesDialog.java
	import_opts = ImporterOptions();
	import_opts.setId(filepath);
	
	reader = ImageReader();
	ome_meta = MetadataTools.createOMEXMLMetadata();
	reader.setMetadataStore(ome_meta);
	reader.setId(filepath);
	no_series = reader.getSeriesCount();
	if no_series == 1:
		return import_opts, params;
	else:
		series_names = [ome_meta.getImageName(idx) for idx in range(no_series)];
		dialog = GenericDialog("Select series to load...");
		dialog.addMessage("There are multiple series in this file! \n" + 
						"This is probably because there are multiple XY stage positions. \n " + 
						"Please choose which series to load: ");
		thumbreader = BufferedImageReader(reader);
		cbg = CheckboxGroup();
		for idx in range(no_series):
			p = Panel();
			p.add(Box.createRigidArea(Dimension(thumbreader.getThumbSizeX(), thumbreader.getThumbSizeY())));
			ThumbLoader.loadThumb(thumbreader, idx, p, True);
			dialog.addPanel(p);
			cb = Checkbox(series_names[idx], cbg, idx==0);
			p.add(cb);

		dialog.showDialog();
		if dialog.wasCanceled():
			raise KeyboardInterrupt("Run canceled");
		if dialog.wasOKed():
			selected_item = cbg.getSelectedCheckbox().getLabel();
			selected_index = series_names.index(selected_item);
			params.setSelectedSeriesIndex(selected_index);
			for idx in range(0, no_series):
				import_opts.setSeriesOn(idx, True) if (idx==selected_index) else import_opts.setSeriesOn(idx, False);
	reader.close();
	return import_opts, params
Esempio n. 12
0
def generate_ome_fromimc(imc_acquisition):
    """

    :param imc_acquisition:
    :return:
    """

    y, x, c = imc_acquisition.shape
    print(x, y, c)
    metadata = MetadataTools.createOMEXMLMetadata()
    filename = '/home/vitoz/temp/test.ome.tiff'
    MetadataTools.populateMetadata(metadata, 0, filename, True, "XYZTC",
                                   FormatTools.getPixelTypeString(6), x, y, 1,
                                   c, 1, 1)
    if imc_acquisition.origin == 'mcd':
        ac_id = imc_acquisition.image_ID
        meta_xml = et.XML(imc_acquisition.original_metadata)
        ns = '{' + meta_xml.tag.split('}')[0].strip('{') + '}'

        channel_xml = [
            channel_xml
            for channel_xml in meta_xml.findall(ns + 'AcquisitionChannel')
            if channel_xml.find(ns + 'AcquisitionID').text == ac_id
        ]

        ac_xml = [
            tx for tx in meta_xml.findall(ns + 'Acquisition')
            if tx.find(ns + 'ID').text == ac_id
        ][0]
        # AcquisitionDate = ac_xml.find(ns+'StartTimeStamp').text
        # Description = ac_xml.find(ns+'Description').text
        # AblationPower = ac_xml.find(ns + 'AblationPower').text
        # AblationDistanceBetweenShots = ac_xml.find(ns + 'AblationDistanceBetweenShots').text
        # AblationFrequency = ac_xml.find(ns + 'AblationFrequency').text
        # ROIID = ac_xml.find(ns + 'ROIID').text
        # OrderNumber = ac_xml.find(ns + 'OrderNumber').text
        # SignalType = ac_xml.find(ns + 'SignalType').text
        # DataStartOffset = ac_xml.find(ns + 'DataStartOffset').text
        # DataEndOffset = ac_xml.find(ns + 'DataEndOffset').text
        # StartTimeStamp = ac_xml.find(ns + 'StartTimeStamp').text
        # EndTimeStamp = ac_xml.find(ns + 'EndTimeStamp').text
        # SegmentDataFormat = ac_xml.find(ns + 'SegmentDataFormat').text
        # ValueBytes = ac_xml.find(ns + 'ValueBytes').text
        #
        # chan_order = [int(cxml.find(ns+'OrderNumber').text) for cxml in channel_xml]
        metadata.setImageID(ac_id, 0)
        metadata.setImageName(ac_id, 0)
        metadata.setPixelsDimensionOrder(DimensionOrder.XYCZT, 0)
        metadata.setPixelsSizeX(PositiveInteger(x), 0)
        metadata.setPixelsSizeY(PositiveInteger(y), 0)
        metadata.setPixelsSizeC(PositiveInteger(c), 0)
        metadata.setPixelsSizeZ(PositiveInteger(1), 0)
        metadata.setPixelsSizeT(PositiveInteger(1), 0)

        metadata.setPixelsPhysicalSizeX(Length(1, units.MICROM), 0)
        metadata.setPixelsPhysicalSizeY(Length(1, units.MICROM), 0)
        metadata.setPixelsPhysicalSizeZ(Length(1, units.MICROM), 0)

        metadata.setPixelsID(ac_id, 0)
        metadata.setPixelsType(PixelType.FLOAT, 0)
        metadata.setPixelsInterleaved(False, 0)

        # metadata.setTiffDataFirstC(NonNegativeInteger(0), 0, 0)
        # metadata.setTiffDataFirstZ(NonNegativeInteger(0), 0, 0)
        # metadata.setTiffDataFirstT(NonNegativeInteger(0), 0, 0)
        print(c)
        for i in range(c):
            metadata.setChannelSamplesPerPixel(PositiveInteger(1), 0, i)
        for cxml in channel_xml:
            cnr = int(cxml.find(ns + 'OrderNumber').text) - 3
            if cnr >= 0:
                name = cxml.find(ns + 'ChannelName').text
                label = cxml.find(ns + 'ChannelLabel')
                if label.text is None:
                    label = name
                else:
                    print(label.text)
                    label = label.text
                print(label)
                print(name)
                cid = '_'.join([label, name])
                cid = cid.strip('(').strip(')')
                name = name.replace('(', '').strip(')')
                metadata.setChannelFluor(name, 0, cnr)
                metadata.setChannelName(cid, 0, cnr)
                metadata.setChannelID(cid, 0, cnr)
        # for i in range(c):
        #     metadata.setPlaneTheC(NonNegativeInteger(i),0,i)
        #     metadata.setPlaneTheZ(NonNegativeInteger(0), 0, i)
        #     metadata.setPlaneTheT(NonNegativeInteger(0), 0, i)

        return metadata

    else:
        ac_id = imc_acquisition.image_ID
        metadata.setImageID(ac_id, 0)
        metadata.setImageName(ac_id, 0)
        metadata.setPixelsDimensionOrder(DimensionOrder.XYCZT, 0)
        metadata.setPixelsSizeX(PositiveInteger(x), 0)
        metadata.setPixelsSizeY(PositiveInteger(y), 0)
        metadata.setPixelsSizeC(PositiveInteger(c), 0)
        metadata.setPixelsSizeZ(PositiveInteger(1), 0)
        metadata.setPixelsSizeT(PositiveInteger(1), 0)

        metadata.setPixelsPhysicalSizeX(Length(1, units.MICROM), 0)
        metadata.setPixelsPhysicalSizeY(Length(1, units.MICROM), 0)
        metadata.setPixelsPhysicalSizeZ(Length(1, units.MICROM), 0)

        metadata.setPixelsID(ac_id, 0)
        metadata.setPixelsType(PixelType.FLOAT, 0)
        metadata.setPixelsInterleaved(False, 0)

        # metadata.setTiffDataFirstC(NonNegativeInteger(0), 0, 0)
        # metadata.setTiffDataFirstZ(NonNegativeInteger(0), 0, 0)
        # metadata.setTiffDataFirstT(NonNegativeInteger(0), 0, 0)
        print(c)
        for i in range(c):
            metadata.setChannelSamplesPerPixel(PositiveInteger(1), 0, i)
        for cnr, metal, label in zip(range(c), imc_acquisition.channel_metals,
                                     imc_acquisition.channel_labels):
            metadata.setChannelFluor(metal, 0, cnr)
            metadata.setChannelName(label, 0, cnr)
            metadata.setChannelID(label, 0, cnr)

        return metadata
def nucleus_detection(infile, nucleus_channel, stacksize, animation):
	# Detect nucleus with 3d log filters
    fullpath = infile
    infile = filename(infile)
    IJ.log("Start Segmentation " + str(infile))
    # First get Nb Stacks
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(fullpath)
    default_options = "stack_order=XYCZT color_mode=Composite view=Hyperstack specify_range c_begin=" + \
        str(nucleus_channel) + " c_end=" + str(nucleus_channel) + \
        " c_step=1 open=[" + fullpath + "]"
    NbStack = reader.getSizeZ()
    reader.close()
    output = re.sub('.ids', '.csv', infile)
    with open(os.path.join(folder5, output), 'wb') as outfile:
        DETECTwriter = csv.writer(outfile, delimiter=',')
        DETECTwriter.writerow(
            ['spotID', 'roundID', 'X', 'Y', 'Z', 'QUALITY', 'SNR', 'INTENSITY'])
    rounds = NbStack // stacksize
    spotID = 1
    for roundid in xrange(1, rounds + 2):
        # Process stacksize by stacksize otherwise crash because too many spots
        Zstart = (stacksize * roundid - stacksize + 1)
        Zend = (stacksize * roundid)
        if(Zend > NbStack):
            Zend = NbStack % stacksize + (roundid - 1) * stacksize
        IJ.log("Round:" + str(roundid) + ' Zstart=' + str(Zstart) +
               ' Zend=' + str(Zend) + ' out of ' + str(NbStack))
        IJ.run("Bio-Formats Importer", default_options + " z_begin=" +
               str(Zstart) + " z_end=" + str(Zend) + " z_step=1")
        imp = IJ.getImage()
        imp.show()
        cal = imp.getCalibration()
        model = Model()
        settings = Settings()
        settings.setFrom(imp)
        # Configure detector - Manually determined as best
        settings.detectorFactory = LogDetectorFactory()
        settings.detectorSettings = {
            'DO_SUBPIXEL_LOCALIZATION': True,
            'RADIUS': 5.5,
            'TARGET_CHANNEL': 1,
            'THRESHOLD': 50.0,
            'DO_MEDIAN_FILTERING': False,
        }
        filter1 = FeatureFilter('QUALITY', 1, True)
        settings.addSpotFilter(filter1)
        settings.addSpotAnalyzerFactory(SpotIntensityAnalyzerFactory())
        settings.addSpotAnalyzerFactory(SpotContrastAndSNRAnalyzerFactory())
        settings.trackerFactory = SparseLAPTrackerFactory()
        settings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap()

        trackmate = TrackMate(model, settings)
        ok = trackmate.checkInput()
        if not ok:
            sys.exit(str(trackmate.getErrorMessage()))
        try:
            ok = trackmate.process()
        except:
            IJ.log("Nothing detected, Round:" + str(roundid) + ' Zstart=' +
                   str(Zstart) + ' Zend=' + str(Zend) + ' out of ' + str(NbStack))
            IJ.selectWindow(infile)
            IJ.run('Close')
            continue
        else:
            if animation:
                # For plotting purpose only
                imp.setPosition(1, 1, imp.getNFrames())
                imp.getProcessor().setMinAndMax(0, 4000)
                selectionModel = SelectionModel(model)
                displayer = HyperStackDisplayer(model, selectionModel, imp)
                displayer.render()
                displayer.refresh()
                for i in xrange(1, imp.getNSlices() + 1):
                    imp.setSlice(i)
                    time.sleep(0.05)
            IJ.selectWindow(infile)
            IJ.run('Close')
            spots = model.getSpots()
            spotIt = spots.iterator(0, False)
            sid = []
            sroundid = []
            x = []
            y = []
            z = []
            q = []
            snr = []
            intensity = []
            for spot in spotIt:
                sid.append(spotID)
                spotID = spotID + 1
                sroundid.append(roundid)
                x.append(spot.getFeature('POSITION_X'))
                y.append(spot.getFeature('POSITION_Y'))
                q.append(spot.getFeature('QUALITY'))
                snr.append(spot.getFeature('SNR'))
                intensity.append(spot.getFeature('MEAN_INTENSITY'))
                # Correct Z position
                correct_z = spot.getFeature(
                    'POSITION_Z') + (roundid - 1) * float(stacksize) * cal.pixelDepth
                z.append(correct_z)
            with open(os.path.join(folder5, output), 'ab') as outfile:
                DETECTwriter = csv.writer(outfile, delimiter=',')
                Sdata = zip(sid, sroundid, x, y, z, q, snr, intensity)
                for Srow in Sdata:
                    DETECTwriter.writerow(Srow)
Esempio n. 14
0
def readczi(imagefile,
            stitchtiles=True,
            setflatres=False,
            readpylevel=0,
            setconcat=True,
            openallseries=True,
            showomexml=False,
            attach=False,
            autoscale=True):

    log.info('Filename : ' + imagefile)

    metainfo = {}
    # checking for thr file Extension
    metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile))
    log.info('Detected File Extension : ' + metainfo['Extension'])

    # initialize the reader and get the OME metadata
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    #metainfo['ImageCount_OME'] = omeMeta.getImageCount()
    reader.setMetadataStore(omeMeta)
    reader.setId(imagefile)
    metainfo['SeriesCount_BF'] = reader.getSeriesCount()
    reader.close()

    # get the scaling for XYZ
    physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
    physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
    physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

    if physSizeX is not None:
        metainfo['ScaleX'] = round(physSizeX.value(), 3)
        metainfo['ScaleY'] = round(physSizeX.value(), 3)

    if physSizeX is None:
        metainfo['ScaleX'] = None
        metainfo['ScaleY'] = None

    if physSizeZ is not None:
        metainfo['ScaleZ'] = round(physSizeZ.value(), 3)
    if physSizeZ is None:
        metainfo['ScaleZ'] = None

    options = DynamicMetadataOptions()
    options.setBoolean("zeissczi.autostitch", stitchtiles)
    options.setBoolean("zeissczi.attachments", attach)

    czireader = ZeissCZIReader()
    czireader.setFlattenedResolutions(setflatres)
    czireader.setMetadataOptions(options)
    czireader.setId(imagefile)

    # Set the preferences in the ImageJ plugin
    # Note although these preferences are applied, they are not refreshed in the UI
    Prefs.set("bioformats.zeissczi.allow.autostitch", str(stitchtiles).lower())
    Prefs.set("bioformats.zeissczi.include.attachments", str(attach).lower())

    # metainfo = {}
    metainfo['rescount'] = czireader.getResolutionCount()
    metainfo['SeriesCount_CZI'] = czireader.getSeriesCount()
    #metainfo['flatres'] = czireader.hasFlattenedResolutions()
    #metainfo['getreslevel'] = czireader.getResolution()

    # Dimensions
    metainfo['SizeT'] = czireader.getSizeT()
    metainfo['SizeZ'] = czireader.getSizeZ()
    metainfo['SizeC'] = czireader.getSizeC()
    metainfo['SizeX'] = czireader.getSizeX()
    metainfo['SizeY'] = czireader.getSizeY()

    # check for autostitching and possibility to read attachment
    metainfo['AllowAutoStitching'] = czireader.allowAutostitching()
    metainfo['CanReadAttachments'] = czireader.canReadAttachments()

    # read in and display ImagePlus(es) with arguments
    options = ImporterOptions()
    options.setOpenAllSeries(openallseries)
    options.setShowOMEXML(showomexml)
    options.setConcatenate(setconcat)
    options.setAutoscale(autoscale)
    options.setId(imagefile)

    # open the ImgPlus
    imps = BF.openImagePlus(options)
    metainfo['Pyramid Level Output'] = readpylevel + 1

    try:
        imp = imps[readpylevel]
        pylevelout = metainfo['SeriesCount_CZI']
    except:
        # fallback option
        log.info('PyLevel=' + str(readpylevel) + ' does not exist.')
        log.info('Using Pyramid Level = 0 as fallback.')
        imp = imps[0]
        pylevelout = 0
        metainfo['Pyramid Level Output'] = pylevelout

    # get the stack and some info
    imgstack = imp.getImageStack()
    metainfo['Output Slices'] = imgstack.getSize()
    metainfo['Output SizeX'] = imgstack.getWidth()
    metainfo['Output SizeY'] = imgstack.getHeight()

    # calc scaling in case of pyramid
    scale = float(metainfo['SizeX']) / float(metainfo['Output SizeX'])
    metainfo['Pyramid Scale Factor'] = scale
    metainfo['ScaleX Output'] = metainfo['ScaleX'] * scale
    metainfo['ScaleY Output'] = metainfo['ScaleY'] * scale

    # set the correct scaling
    imp = MiscTools.setscale(imp, scaleX=metainfo['ScaleX Output'],
                             scaleY=metainfo['ScaleX Output'],
                             scaleZ=metainfo['ScaleZ'],
                             unit="micron")

    # close czireader
    czireader.close()

    return imp, metainfo
Esempio n. 15
0
def getCZIinfo(imagefile, showimage=False, setreslevel=0, setflat2=False, openallseries=True, showomexml=False,setconcat=False,filepath1="./"):
    options = DynamicMetadataOptions()
    options.setBoolean("zeissczi.attachments", False)
    czireader = ZeissCZIReader()
    czireader.setFlattenedResolutions(setflat2)
    czireader.setMetadataOptions(options)
    czireader.setId(imagefile)
    lc = czireader.getSeriesCount()
    #get the first occurence of each pyramid stack
    location=list()
    for i in range(0, int(seriesCount)-2):
        location.append(czireader.coreIndexToSeries(i))
        c=0
    #log.info(location)
    loc2=list()
    for i,v in enumerate(location):
    	if i==0:
        	loc2.append(i)
    	elif i>0 and v!=c:
        	loc2.append(i)
        	c=v
    log.info(str(loc2))
    # get OME data
    omeMeta = MetadataTools.createOMEXMLMetadata()
    # Set the preferences in the ImageJ plugin
    Prefs.set("bioformats.zeissczi.include.attachments", str(True).lower())
    if showimage:

        # read in and display ImagePlus(es) with arguments
        options = ImporterOptions()
        options.setOpenAllSeries(openallseries)
        options.setShowOMEXML(showomexml)
        options.setConcatenate(setconcat)
        options.setId(imagefile)

        # open the ImgPlus
        imps = BF.openImagePlus(options)
        name_list=imagefile.split('/')
        name=name_list[len(name_list)-1]
        out_path=filepath1  + "/"+name+"_Preview.tif"
        log.info(name)
        imp=getImageSeries(imps, seriesCount-1)
        imp.show()
        IJ.run("RGB Color")
        imp.close()
        IJ.saveAs("tiff", out_path)
        IJ.run("Close")
        out_path=filepath1  + "/"+name+"_Label.tif"
        imp=getImageSeries(imps, (seriesCount-2))
        imp.show()
        IJ.run("RGB Color")
        imp.close()
        IJ.saveAs("tiff", out_path)
        IJ.run("Close")
        c=1
        for series in loc2:
        	out_path=filepath1  + "/"+name+"Scene_" + str(c) + ".tif"
        	imp=getImageSeries(imps, series)
        	imp.show()
        	IJ.run("RGB Color")
        	imp.close()
        	IJ.saveAs("tiff", out_path)
        	IJ.run("Close")
        	c+=1
    czireader.close()
Esempio n. 16
0
    def openfile(imagefile,
                 stitchtiles=True,
                 setflatres=False,
                 readpylevel=0,
                 setconcat=True,
                 openallseries=True,
                 showomexml=False,
                 attach=False,
                 autoscale=True,
                 imageID=0):

        # stitchtiles = option of CZIReader to return the raw tiles as
        # individual series rather than the auto-stitched images

        metainfo = {}
        # checking for thr file Extension
        metainfo['Extension'] = MiscTools.getextension(MiscTools.splitext_recurse(imagefile))

        # initialite the reader and get the OME metadata
        reader = ImageReader()
        omeMeta = MetadataTools.createOMEXMLMetadata()
        metainfo['ImageCount_OME'] = omeMeta.getImageCount()
        reader.setMetadataStore(omeMeta)
        reader.setId(imagefile)
        metainfo['SeriesCount_BF'] = reader.getSeriesCount()
        reader.close()

        # read dimensions TZCXY from OME metadata
        metainfo['SizeT'] = omeMeta.getPixelsSizeT(imageID).getValue()
        metainfo['SizeZ'] = omeMeta.getPixelsSizeZ(imageID).getValue()
        metainfo['SizeC'] = omeMeta.getPixelsSizeC(imageID).getValue()
        metainfo['SizeX'] = omeMeta.getPixelsSizeX(imageID).getValue()
        metainfo['SizeY'] = omeMeta.getPixelsSizeY(imageID).getValue()

        # store info about stack
        if metainfo['SizeZ'] == 1:
            metainfo['is3d'] = False
        elif metainfo['SizeZ'] > 1:
            metainfo['is3d'] = True

        # get the scaling for XYZ
        physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
        physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
        physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

        if physSizeX is not None:
            metainfo['ScaleX'] = round(physSizeX.value(), 3)
            metainfo['ScaleY'] = round(physSizeX.value(), 3)
        if physSizeX is None:
            metainfo['ScaleX'] = None
            metainfo['ScaleY'] = None

        if physSizeZ is not None:
            metainfo['ScaleZ'] = round(physSizeZ.value(), 3)
        if physSizeZ is None:
            metainfo['ScaleZ'] = None

        # if image file is Carl Zeiss Image - CZI
        if metainfo['Extension'] == '.czi':

            # read the CZI file using the CZIReader
            # pylevel = 0 - read the full resolution image

            imp, metainfo = ImportTools.readCZI(imagefile, metainfo,
                                                stitchtiles=stitchtiles,
                                                setflatres=setflatres,
                                                readpylevel=readpylevel,
                                                setconcat=setconcat,
                                                openallseries=openallseries,
                                                showomexml=showomexml,
                                                attach=attach,
                                                autoscale=autoscale)

        # if image file is not Carl Zeiss Image - CZI
        if metainfo['Extension'] != '.czi':

            # read the imagefile using the correct method
            if metainfo['Extension'].lower() == ('.jpg' or '.jpeg'):
                # use dedicated method for jpg
                imp, metainfo = ImageTools.openjpg(imagefile, method='IJ')
            else:
                # if not jpg - use BioFormats
                imp, metainfo = ImportTools.readbf(imagefile, metainfo,
                                                   setflatres=setflatres,
                                                   readpylevel=readpylevel,
                                                   setconcat=setconcat,
                                                   openallseries=openallseries,
                                                   showomexml=showomexml,
                                                   autoscale=autoscale)

        return imp, metainfo
Esempio n. 17
0
def check_fusion_settings(czi_path):
    """Check for fusion settings and asks confirmation to user if H5/XML fusion

    Parameters
    ----------
    czi_path : str
        Path to the CZI file

    Returns
    -------
    bool
        Bool for fusion
    str
        Method of RAM handling
    bool
        Bool for TIFF or H5/XML fusion
    """

    # Default values
    do_fusion = True
    fuse_tiff = True

    reader = ZeissCZIReader()
    m = DynamicMetadataOptions()
    m.setBoolean(ZeissCZIReader.ALLOW_AUTOSTITCHING_KEY, False)
    m.setBoolean(ZeissCZIReader.RELATIVE_POSITIONS_KEY, True)
    reader.setMetadataOptions(m)
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(str(czi_path))

    nbr_tp = omeMeta.getTimestampAnnotationCount() + 1
    nbr_chnl = omeMeta.getChannelCount(0)

    # check the file size of the file to be fused and compare to the available RAM
    # h5_filesize = os.path.getsize(export_path_temp + ".h5")
    h5_filesize = os.path.getsize(czi_path) / 2
    free_memory = get_free_memory()

    print("h5 filesize " + convert_bytes(h5_filesize))
    print("free memory in ij " + convert_bytes(free_memory))

    # TODO: include in below calculation t_end, since only one t is fused at a time.
    if free_memory > (6 * h5_filesize / downsampling):
        ram_handling = "[Precompute Image]"
    else:
        ram_handling = "Virtual"
    print("fusion mode used " + str(ram_handling))

    # if autoselect_illuminations and nbr_ill > 1:
    #     ill_value = 2
    # else:
    #     ill_value = 1

    ram_requirement = 2 * h5_filesize / (nbr_tp * nbr_chnl * downsampling)
    print(ram_requirement)
    sufficient_ram = ram_requirement < free_memory / 10

    if not sufficient_ram:
        try:
            yn = YesNoCancelDialog(
                IJ.getInstance(),
                "Warning!",
                (
                    "File size is too big to use TIFF for fusion\n"
                    "Fusion will happen using H5/XML which might take weeks. Are you "
                    "sure you want to do fusion ?\n"
                    "All steps prior to fusion would still happen, allowing for manual "
                    "fusion and tile selection."
                ),
            )
            if yn.yesPressed():
                fuse_tiff = False
            else:
                do_fusion = False
        except Exception:
            # when running headless the above will raise a java.awt.HeadlessException,
            # so we simply fall back to the same behavior as if "No" was clicked:
            do_fusion = False
    return do_fusion, ram_handling, fuse_tiff
Esempio n. 18
0
def run():
    t_start = datetime.now()
    image_paths = glob(os.path.join(str(import_dir.getPath()), '*tif'))

    print '\tread image metadata'
    reader = ImageReader()
    in_meta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(in_meta)

    x_dims = []
    y_dims = []
    z_dims = []
    c_dims = []
    t_dims = []
    eff = []
    spp = []

    for image_path in image_paths:
        print '\t  parse %s' % (image_path)
        reader.setId(image_path)
        x_dims.append(reader.getSizeX())
        y_dims.append(reader.getSizeY())
        z_dims.append(reader.getSizeZ())
        c_dims.append(reader.getSizeC())
        t_dims.append(reader.getSizeT())
        eff.append(reader.imageCount / z_dims[-1] / t_dims[-1])
        spp.append(reader.getSizeC() / eff[-1])

    format = FormatTools.getPixelTypeString(reader.getPixelType())
    series = reader.getSeries()
    big_endian = Boolean.FALSE
    order = reader.getDimensionOrder()
    reader.close()

    # Compute the dimensions of the output file
    x_dim = max(x_dims)
    y_dim = max(y_dims)
    z_dim = max(z_dims)
    c_dim = max(c_dims)
    t_dim = max(t_dims)

    print '\t  series: %i' % series
    print '\t  format: %s' % format
    print '\t  dimension order: %s' % order
    print '\t  x: %s -> %i' % (x_dims, x_dim)
    print '\t  y: %s -> %i' % (y_dims, y_dim)
    print '\t  z: %s -> %i' % (z_dims, z_dim)
    print '\t  c: %s -> %i' % (c_dims, c_dim)
    print '\t  t: %s -> %i' % (t_dims, t_dim)
    print '\t  effective size c: %s' % eff
    print '\t  samples per pixel: %s' % spp

    # Get the time dimension from the number of input files
    t_dim = len(image_paths)

    # TODO: Tried to work out the order with Axes class, got something weird though.
    dimensions = [Short(x_dim), Short(y_dim), Short(c_dim), Short(z_dim)]

    pixels_per_plane = x_dim * y_dim

    # Assemble the metadata for the output file
    out_meta = MetadataTools.createOMEXMLMetadata()
    out_meta.setImageID(MetadataTools.createLSID('Image', series), series)
    out_meta.setPixelsID(MetadataTools.createLSID('Pixels', series), series)
    out_meta.setPixelsBinDataBigEndian(Boolean.TRUE, 0, 0)
    out_meta.setPixelsDimensionOrder(DimensionOrder.fromString(order), series)
    out_meta.setPixelsType(PixelType.fromString(format), series)
    out_meta.setPixelsSizeX(PositiveInteger(x_dim), series)
    out_meta.setPixelsSizeY(PositiveInteger(y_dim), series)
    out_meta.setPixelsSizeZ(PositiveInteger(z_dim), series)
    out_meta.setPixelsSizeC(PositiveInteger(c_dim), series)
    out_meta.setPixelsSizeT(PositiveInteger(t_dim), series)

    for c in range(c_dim):
        out_meta.setChannelID(MetadataTools.createLSID('Channel', series, c),
                              series, c)
        out_meta.setChannelSamplesPerPixel(PositiveInteger(1), series, c)

    # Initialize the BF writer
    result_path = os.path.join(result_dir.getPath(), result_name)
    writer = ImageWriter()
    writer.setMetadataRetrieve(out_meta)
    writer.setId(result_path)
    print '\tcreated to %s' % (result_path)

    # Write the stacks into the output file
    N = len(image_paths)
    for i, image_path in enumerate(image_paths):
        status.showStatus(i, N, "catenating %i of %i time-points" % (i, N))
        print '\t  processing %s' % (image_path)
        ds = io.open(image_path)
        xi = ds.dimensionIndex(Axes.X)
        xv = ds.dimension(xi)
        yi = ds.dimensionIndex(Axes.Y)
        yv = ds.dimension(yi)
        zi = ds.dimensionIndex(Axes.Z)
        zv = ds.dimension(zi)
        ti = ds.dimensionIndex(Axes.TIME)
        tv = ds.dimension(ti)
        ci = ds.dimensionIndex(Axes.CHANNEL)
        cv = ds.dimension(ci)

        dx = float(x_dim - xv) / 2.0
        dy = float(y_dim - yv) / 2.0
        dz = float(z_dim - zv) / 2.0
        print '\t     translation vector (dx, dy, dz) = (%f, %f, %f)' % (
            dx, dy, dz)

        if (dx != 0) or (dy != 0) or (dz != 0):
            stk = Views.translate(ds, long(dx), long(dy), long(0), long(dz))
            stk = Views.extendZero(stk)
        else:
            stk = Views.extendZero(ds.getImgPlus().getImg())

        print '\t     writing planes ',
        n = 0
        plane = 1
        byte_array = []
        interval_view = Views.interval(stk, \
                                       [Long(0), Long(0), Long(0), Long(0)], \
                                       [Long(x_dim - 1), Long(y_dim - 1), Long(c_dim - 1), Long(z_dim - 1)])
        cursor = interval_view.cursor()
        while cursor.hasNext():
            n += 1
            cursor.fwd()
            value = cursor.get().getInteger()
            bytes = DataTools.shortToBytes(value, big_endian)
            byte_array.extend(bytes)

            if n == pixels_per_plane:
                writer.saveBytes(plane - 1, byte_array)

                print '.',
                if ((plane) % 10) == 0:
                    print '\n\t                    ',

                byte_array = []
                plane += 1
                n = 0

        print ' '

    writer.close()
    t = datetime.now() - t_start
    print '\twrote %i planes to %s in %i sec.' % (plane - 1, result_path,
                                                  t.total_seconds())
    print '... done.'
def processFile():
	# start logging
	IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n")

	# ask user for file
	ofd = OpenDialog("Choose a file", None)  
	filename = ofd.getFileName()  
  
	if filename is None:  
  		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
  		return

  	directory = ofd.getDirectory()  
  	filepath = directory + filename  
  	IJ.log("File path: " + filepath)

	if not filename.endswith(".oir"):
		IJ.log("Not an Olympus (.oir) file.\nNo image to process.\n")
		return

	filenameExExt = os.path.splitext(filename)[0]
      
	# parse metadata
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(filepath)
	numChannels = reader.getSizeC()
	numSlices = reader.getSizeZ()
	numFrames = reader.getSizeT()
	seriesCount = reader.getSeriesCount()

	globalMetadata = reader.getGlobalMetadata()
	seriesMetadata = reader.getSeriesMetadata()

	objLensName = globalMetadata['- Objective Lens name #1']

	areaRotation = float(seriesMetadata['area rotation #1'])
	acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1'])
	if 'regionInfo rotation #1' in seriesMetadata:
		regionInfoRotation = float(seriesMetadata['regionInfo rotation #1'])
	else:
		regionInfoRotation = float(0)

	totalRotation = areaRotation + regionInfoRotation
	physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
	physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
	pxSizeX = physSizeX.value(UNITS.MICROM)
	pxSizeY = physSizeY.value(UNITS.MICROM)

	# log metadata
	IJ.log("\nMETADATA")
	#IJ.log("Filename: " + filepath)
	IJ.log("Number of series: " + str(seriesCount))
	IJ.log("Number of channels: " + str(numChannels))
	IJ.log("Number of frames: " + str(numFrames))
	IJ.log("Number of slices: " + str(numSlices))
	IJ.log("Objective lens: " + objLensName)
	IJ.log("FOV rotation: " + str(areaRotation))
	IJ.log("ROI rotation: " + str(regionInfoRotation))
	IJ.log("Total rotation: " + str(totalRotation))
	IJ.log("Pixel size:")
	IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol())
	IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol())

	# ask user to identify dichroic mirror used for each channel  
	gdDM = GenericDialog("Dichroic mirrors")
	DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] 
	for i in range(numChannels):
		gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0])
	gdDM.addCheckbox("Merge channels", False) 
	gdDM.showDialog()
	if gdDM.wasCanceled():
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
		return
	dichroics = []
	for i in range(numChannels):
		dichroics.append(gdDM.getNextChoice())
	merge = gdDM.getNextBoolean()
	IJ.log("\nUser selected dichroic mirrors")
	for i in range(numChannels):
		IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i])	

	if merge:
		channels = []
		chDict = {}
		for i in range(numChannels):
			chName = "Channel"+str(i+1)
			channels.append(chName)
			chDict[chName] = i
		channels.append("NONE")
		colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"]
		gdMerge = GenericDialog("Merge channels")
		for c in colourChoices:
			gdMerge.addChoice(c + ":", channels, channels[numChannels])
		gdMerge.showDialog()
		if gdMerge.wasCanceled():
			IJ.log("User canceled the dialog!\nImage processing canceled!\n")
			return
		IJ.log("\nUser selected channel colours")
		mergeList = []
		for i in range(len(colourChoices)):
			ch = gdMerge.getNextChoice()
			if ch == "NONE":
				mergeList.append(None)
			else:
				mergeList.append(chDict[ch])
				IJ.log("\t\t" + colourChoices[i] + ": " + ch)

	# ask user for an output directory
	dc = DirectoryChooser("Choose folder for output")  
	od = dc.getDirectory()    
	if od is None:  
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")  
		return  
  
	if merge:
		tifDir = od + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated temporary folder: " + tifDir + "\n")
		else:
			IJ.log("Unable to create temporary folder!\n")
	else:
		tifDir = od + filenameExExt + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated subfolder: " + tifDir + "\n")
		else:
			IJ.log("\nSubfolder " + tifDir +  " already exists")

	# correct images
	tifFilePaths = []
	for i in range(numChannels):
		ip = extractChannel(oirFile=filepath, ch=i)
		if dichroics[i] == "DM1":
			IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.")
		else:
			offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]])
			xom = offsets['x']
			yom = offsets['y']
			if abs(totalRotation) > 0.1:
				rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation)
				xom = rotOff['x']
				yom = rotOff['y']
			xop = int(round(xom/pxSizeX))
			yop = int(round(yom/pxSizeY))
			IJ.log("Channel " + str(i+1) + " offsets")
			IJ.log("\t\tMicrometres")
			IJ.log("\t\t\t\tx = " + str(xom))
			IJ.log("\t\t\t\ty = " + str(yom))
			IJ.log("\t\tPixels")
			IJ.log("\t\t\t\tx = " + str(xop))
			IJ.log("\t\t\t\ty = " + str(yop))
			IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack")

		tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif"
		tifFilePaths.append(tifFilePath)
		if os.path.exists(tifFilePath):
			IJ.log("\nOutput file exists: " + tifFilePath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
			return
		FileSaver(ip).saveAsTiff(tifFilePath)

	if merge:
		for i in range(len(mergeList)):
			if mergeList[i] != None:
				mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]])
		merged = RGBStackMerge.mergeChannels(mergeList, False)
		mergedChannelFilepath = od + filenameExExt + ".tif"
		if os.path.exists(mergedChannelFilepath):
			IJ.log("\nOutput file exists: " + mergedChannelFilepath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
		FileSaver(merged).saveAsTiff(mergedChannelFilepath)
		for tf in tifFilePaths:
			os.remove(tf)
		os.rmdir(tifDir)
			
	IJ.log("\nFinished processing file:\n" + filepath + "\n")
	if merge:
		IJ.log("Image file with channels aligned:\n" + od + filenameExExt + ".tif\n")
	else:
		IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
def initreader(vsi_path):
    reader = ImageReader()
    omeMeta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(omeMeta)
    reader.setId(vsi_path)
    return(reader)
Esempio n. 21
0
from net.imagej.axis import Axes
from jarray import array
from net.imglib2.type.numeric.integer import UnsignedByteType
import net.imglib2.type.logic.BitType
import net.imglib2.algorithm.neighborhood.HyperSphereShape
from net.imglib2.type.numeric.real import FloatType,DoubleType
from ij.measure import ResultsTable
from net.imagej.ops import Ops
from loci.plugins.in import ImporterOptions
options = ImporterOptions()
options.setId(Input_File.getAbsolutePath())
from loci.formats import ImageReader
from loci.formats import MetadataTools
#get import ready and import
reader = ImageReader()
omeMeta = MetadataTools.createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(Input_File.getAbsolutePath())
seriesCount = reader.getSeriesCount()
reader.close()
#open image
imp, = BF.openImagePlus(options)
#get output path variable
outdir=Output_File.getAbsolutePath()
#get input path variable
inpu=Input_File.getAbsolutePath()
#convert to RGB
IC(imp).convertToRGB()
#show image
imp.show()
#Define ROI of whole image (basically)
Esempio n. 22
0
def run_script(params):

	input_dir = params['directory']
	gridX = params['gridX']
	gridY = params['gridY']
	select_channel = params['select_channel']
	channel = params['channel']

	input_data = glob.glob("%s*.tiff"%input_dir)
	first = [s for s in input_data if "T0_C0" in s][0]
	start = first.index("Z")+1
	sub = first[start:]
	stop = sub.index("_")
	digits = sub[:stop-1]
	sep = os.path.sep
			
	original_metadata = []
	for filename in input_data:
		meta = MetadataTools.createOMEXMLMetadata()
		reader = get_reader(filename,meta)
		original_metadata.append(meta)
		reader.close()

	complete_meta = original_metadata[0]
	channels_meta = channel_info(complete_meta)
	if len(input_data) != (gridX * gridY * len(channels_meta)):
		IJ.log("Stopped stitching - gridX or gridY not set correctly")
		return
	
	channels = channels_meta
	if select_channel:
		channels = [channels_meta[channel]] # a list of len 1 with a dictionary of channel metadata
		
	num_tiles,num_slices = tile_info(complete_meta)
	if params['separate_z']:
		sizeZ = num_slices
	else:
		sizeZ = 1
		
	for z in range(sizeZ):
		for t in range(num_tiles):
			for c,chan in enumerate(channels):
				frag = "Z%s%s_T%s_C%s"%(digits,z,t,chan['ID'])
				input_path = [s for s in input_data if frag in s][0]
				IJ.log("Transforming metadata in image %s"%os.path.basename(input_path))
				tile_meta = MetadataTools.createOMEXMLMetadata()
				tile_meta = set_metadata(complete_meta,tile_meta,chan)
				replace_meta(tile_meta,input_path)

	idx = input_data[0].index("Z%s0_T0_C0.tiff"%digits)
	prefix = input_data[0][:idx]
	trunc_filenames = []
	for filename in input_data:
		new_filename = input_dir+filename[idx:]
		os.rename(filename,new_filename)
		trunc_filenames.append(new_filename)

	while not os.path.exists(trunc_filenames[-1]):
	   time.sleep(1)
		
	physX,physY,physZ = pixel_info(complete_meta)	
	for c,chan in enumerate(channels):
		tile_names = "Z%s0_T{i}_C%s.tiff"%(digits,chan['ID'])
		run_stitching(input_dir,tile_names,gridX,gridY)
		write_fused(input_dir,chan,num_slices,c+1,\
					physX,physY,physZ) # channel index starts at 1
					
	restore_metadata(input_dir,original_metadata,prefix)
	delete_slices(input_dir)
def processFile(filename, inDir, outDir, dichroics, mergeList):

	if mergeList is None:
		merge = False
	else:
		merge = True
	
	filenameExExt = os.path.splitext(filename)[0]
	filepath = inDir + filename
      
	# parse metadata
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(filepath)
	numChannels = reader.getSizeC()
	numSlices = reader.getSizeZ()
	numFrames = reader.getSizeT()
	seriesCount = reader.getSeriesCount()

	globalMetadata = reader.getGlobalMetadata()
	seriesMetadata = reader.getSeriesMetadata()

	objLensName = globalMetadata['- Objective Lens name #1']

	areaRotation = float(seriesMetadata['area rotation #1'])
	acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1'])
	if 'regionInfo rotation #1' in seriesMetadata:
		regionInfoRotation = float(seriesMetadata['regionInfo rotation #1'])
	else:
		regionInfoRotation = float(0)

	totalRotation = areaRotation + regionInfoRotation
	physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
	physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
	pxSizeX = physSizeX.value(UNITS.MICROM)
	pxSizeY = physSizeY.value(UNITS.MICROM)

	# log metadata
	IJ.log("\nMETADATA")
	#IJ.log("Filename: " + filepath)
	IJ.log("Number of series: " + str(seriesCount))
	IJ.log("Number of channels: " + str(numChannels))
	IJ.log("Number of frames: " + str(numFrames))
	IJ.log("Number of slices: " + str(numSlices))
	IJ.log("Objective lens: " + objLensName)
	IJ.log("FOV rotation: " + str(areaRotation))
	IJ.log("ROI rotation: " + str(regionInfoRotation))
	IJ.log("Total rotation: " + str(totalRotation))
	IJ.log("Pixel size:")
	IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol())
	IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol())
  
	if merge:
		tifDir = outDir + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated temporary folder: " + tifDir + "\n")
		else:
			IJ.log("Unable to create temporary folder!\n")
	else:
		tifDir = outDir + filenameExExt + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated subfolder: " + tifDir + "\n")
		else:
			IJ.log("\nSubfolder " + tifDir +  " already exists.\n")

	# correct images
	tifFilePaths = []
	for i in range(numChannels):
		ip = extractChannel(oirFile=filepath, ch=i)
		if dichroics[i] == "DM1":
			IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.")
		else:
			offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]])
			xom = offsets['x']
			yom = offsets['y']
			if abs(totalRotation) > 0.1:
				rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation)
				xom = rotOff['x']
				yom = rotOff['y']
			xop = int(round(xom/pxSizeX))
			yop = int(round(yom/pxSizeY))
			IJ.log("Channel " + str(i+1) + " offsets")
			IJ.log("\t\tMicrometres")
			IJ.log("\t\t\t\tx = " + str(xom))
			IJ.log("\t\t\t\ty = " + str(yom))
			IJ.log("\t\tPixels")
			IJ.log("\t\t\t\tx = " + str(xop))
			IJ.log("\t\t\t\ty = " + str(yop))
			IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack")

		tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif"
		tifFilePaths.append(tifFilePath)
		if os.path.exists(tifFilePath):
			IJ.log("\nOutput file exists: " + tifFilePath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
			return
		FileSaver(ip).saveAsTiff(tifFilePath)

	if merge:
		max_list = []
		for i in range(len(mergeList)):
			if mergeList[i] != None:
				mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]])
				channel = mergeList[i]#https://python.hotexamples.com/examples/ij.plugin/RGBStackMerge/mergeChannels/python-rgbstackmerge-mergechannels-method-examples.html
				projector = ZProjector(channel)
				projector.setMethod(ZProjector.MAX_METHOD)
				projector.doProjection()
				max_list.append(projector.getProjection())
		merged = RGBStackMerge.mergeChannels(mergeList, False)
		merged_max = RGBStackMerge.mergeChannels(max_list, False)
		mergedChannelFilepath = outDir + filenameExExt + ".tif"
		maxMergedChannelFilepath = outDir + filenameExExt + "_max.tif"
		if os.path.exists(mergedChannelFilepath):
			IJ.log("\nOutput file exists: " + mergedChannelFilepath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
		FileSaver(merged).saveAsTiff(mergedChannelFilepath)
		FileSaver(merged_max).saveAsTiff(maxMergedChannelFilepath)
		for tf in tifFilePaths:
			os.remove(tf)
		os.rmdir(tifDir)	

	IJ.log("\nFinished processing file:\n" + filepath + "\n")
	if merge:
		IJ.log("Image file with channels aligned:\n" + outDir + filenameExExt + ".tif\n")
	else:
		IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
def get_ome_metadata(source, imagenames):
    """Get the stage coordinates and calibration from the ome-xml for a given list of images

    Arguments:
        source {string} -- Path to the images
        imagenames {list} -- list of images filenames

    Returns:
        a tuple that contains:
        dimensions {int} -- number of dimensions (2D or 3D)
        stage_coordinates_x {list} -- the abosolute stage x-coordinates from ome-xml metadata
        stage_coordinates_y {list} -- the abosolute stage y-coordinates from ome-xml metadata
        stage_coordinates_z {list} -- the abosolute stage z-coordinates from ome-xml metadata
        relative_coordinates_x_px {list} -- the relative stage x-coordinates in px
        relative_coordinates_y_px {list} -- the relative stage y-coordinates in px
        relative_coordinates_z_px {list} -- the relative stage z-coordinates in px
        image_calibration {list} -- x,y,z image calibration in unit/px
        calibration_unit {string} -- image calibration unit
        image_dimensions_czt {list} -- number of images in dimensions c,z,t
    """

    # open an array to store the abosolute stage coordinates from metadata
    stage_coordinates_x = []
    stage_coordinates_y = []
    stage_coordinates_z = []

    for counter, image in enumerate(imagenames):

        # parse metadata
        reader = ImageReader()
        omeMeta = MetadataTools.createOMEXMLMetadata()
        reader.setMetadataStore(omeMeta)
        reader.setId(source + str(image))

        # get hyperstack dimensions from the first image
        if counter == 0:
            frame_size_x = reader.getSizeX()
            frame_size_y = reader.getSizeY()
            frame_size_z = reader.getSizeZ()
            frame_size_c = reader.getSizeC()
            frame_size_t = reader.getSizeT()

            # note the dimensions
            if frame_size_z == 1:
                dimensions = 2
            if frame_size_z > 1:
                dimensions = 3

            # get the physical calibration for the first image series
            physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
            physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
            physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

            # workaround to get the z-interval if physSizeZ.value() returns None.
            z_interval = 1
            if physSizeZ is not None:
                z_interval = physSizeZ.value()

            if frame_size_z > 1 and physSizeZ is None:
                print "no z calibration found, trying to recover"
                first_plane = omeMeta.getPlanePositionZ(0, 0)
                next_plane_imagenumber = frame_size_c + frame_size_t - 1
                second_plane = omeMeta.getPlanePositionZ(
                    0, next_plane_imagenumber)
                z_interval = abs(
                    abs(first_plane.value()) - abs(second_plane.value()))
                print "z-interval seems to be: ", z_interval

            # create an image calibration
            image_calibration = [
                physSizeX.value(),
                physSizeY.value(), z_interval
            ]
            calibration_unit = physSizeX.unit().getSymbol()
            image_dimensions_czt = [frame_size_c, frame_size_z, frame_size_t]

        reader.close()

        # get the plane position in calibrated units
        current_position_x = omeMeta.getPlanePositionX(0, 0)
        current_position_y = omeMeta.getPlanePositionY(0, 0)
        current_position_z = omeMeta.getPlanePositionZ(0, 0)

        # get the absolute stage positions and store them
        pos_x = current_position_x.value()
        pos_y = current_position_y.value()

        if current_position_z is None:
            print "the z-position is missing in the ome-xml metadata."
            pos_z = 1.0
        else:
            pos_z = current_position_z.value()

        stage_coordinates_x.append(pos_x)
        stage_coordinates_y.append(pos_y)
        stage_coordinates_z.append(pos_z)

    # calculate the store the relative stage movements in px (for the grid/collection stitcher)
    relative_coordinates_x_px = []
    relative_coordinates_y_px = []
    relative_coordinates_z_px = []

    for i in range(len(stage_coordinates_x)):
        rel_pos_x = (stage_coordinates_x[i] -
                     stage_coordinates_x[0]) / physSizeX.value()
        rel_pos_y = (stage_coordinates_y[i] -
                     stage_coordinates_y[0]) / physSizeY.value()
        rel_pos_z = (stage_coordinates_z[i] -
                     stage_coordinates_z[0]) / z_interval

        relative_coordinates_x_px.append(rel_pos_x)
        relative_coordinates_y_px.append(rel_pos_y)
        relative_coordinates_z_px.append(rel_pos_z)

    return (dimensions, stage_coordinates_x, stage_coordinates_y,
            stage_coordinates_z, relative_coordinates_x_px,
            relative_coordinates_y_px, relative_coordinates_z_px,
            image_calibration, calibration_unit, image_dimensions_czt)
Esempio n. 25
0
def processMovie(root, files, outfile):
    """Concatenate images and write ome.tiff file.
    If image contains already multiple time points just copy the image"""

    files.sort()

    options = ImporterOptions()
    options.setId(files[0])
    options.setVirtual(1)

    image = BF.openImagePlus(options)
    image = image[0]
    if image.getNFrames() > 1:
        msg = ("%s Contains multiple time points. Can only concatenate"
               " single time points!" %files[0])
        raise RuntimeError(msg)
        image.close()

    reader = ImageReader()
    reader.setMetadataStore(MetadataTools.createOMEXMLMetadata())
    reader.setId(files[0])
    timeInfo = []
    omeOut = reader.getMetadataStore()
    omeOut = setUpXml(omeOut, image, files)
    reader.close()
    image.close()
    itime = 0

    for fileName in files:
        omeMeta = MetadataTools.createOMEXMLMetadata()
        reader.setMetadataStore(omeMeta)
        reader.setId(fileName)
        timeInfo.append(getTimePoint(reader, omeMeta))

        nrImages = reader.getImageCount()
        for i in range(0, reader.getImageCount()):
            try:
                dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2)
            except:
                dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds
            omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages)
            omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages)
            omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages)
            omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheT(omeOut.getPlaneTheT(0,i), 0, i + itime*nrImages)
            omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages)
        itime = itime + 1
        reader.close()

        IJ.showProgress(files.index(fileName), len(files))

    try:
        omeOut.setPixelsTimeIncrement(float(dT/(len(files)-1)), 0)
    except:
        omeOut.setPixelsTimeIncrement(0, 0)

    if len(files) <= 1:
        raise RuntimeError('Found only one file. Nothing to concatenate')

    outfile = concatenateImagePlus(files, outfile)
    filein = RandomAccessInputStream(outfile)
    fileout = RandomAccessOutputStream(outfile)
    saver = TiffSaver(fileout, outfile)
    saver.overwriteComment(filein, omeOut.dumpXML())
    fileout.close()
    filein.close()
def processDirectory():
	# start logging
	IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n")

	# ask user for an input directory
	dc = DirectoryChooser("Choose folder containing Olympus (.oir) files")  
	inputDir = dc.getDirectory() 
	if inputDir is None:  
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")  
		return  
	IJ.log("\nInput directory: " + inputDir + "\n")

	oirFiles = []
	for f in os.listdir(inputDir):
		if f.endswith(".oir"):
			oirFiles.append(f)

	if len(oirFiles) < 1:
		IJ.log("Input directory does not contain any Olympus (.oir) files.\nNo images to process.\n")
		return
  	
	# find out how many channels are in first file (we will assume all files have same number of channels and were acquired using same DMs)
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(inputDir + oirFiles[0])
	numChannels = reader.getSizeC()

	# ask user to identify dichroic mirror used for each channel  
	gdDM = GenericDialog("Dichroic mirrors")
	DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] 
	for i in range(numChannels):
		gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0])
	gdDM.addCheckbox("Merge channels", False) 
	gdDM.showDialog()
	if gdDM.wasCanceled():
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
		return
	dichroics = []
	for i in range(numChannels):
		dichroics.append(gdDM.getNextChoice())
	merge = gdDM.getNextBoolean()
	IJ.log("User selected dichroic mirrors")
	for i in range(numChannels):
		IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i])
	IJ.log("\n")

	if merge:
		channels = []
		chDict = {}
		for i in range(numChannels):
			chName = "Channel"+str(i+1)
			channels.append(chName)
			chDict[chName] = i
		channels.append("NONE")
		colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"]
		gdMerge = GenericDialog("Merge channels")
		for c in colourChoices:
			gdMerge.addChoice(c + ":", channels, channels[numChannels])
		gdMerge.showDialog()
		if gdMerge.wasCanceled():
			IJ.log("User canceled the dialog!\nImage processing canceled!\n")
			return
		IJ.log("User selected channel colours")
		usersMergeList = []
		for i in range(len(colourChoices)):
			ch = gdMerge.getNextChoice()
			if ch == "NONE":
				usersMergeList.append(None)
			else:
				usersMergeList.append(chDict[ch])
				IJ.log("\t\t" + colourChoices[i] + ": " + ch)
		IJ.log("\n\n")

	# ask user for an output directory
	dc = DirectoryChooser("Choose folder for output")  
	outputDir = dc.getDirectory()    
	if outputDir is None:  
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")  
		return  

	counter = 0
	totalFiles = len(oirFiles)
	for o in oirFiles:
		counter +=1
		IJ.log("Processing file " + str(counter) + " of " + str(totalFiles) + "\n")
		IJ.log("File path: " + inputDir + o)
		if merge:
			ml = usersMergeList[:]
		else:
			ml = None
		processFile(o, inputDir, outputDir, dichroics, ml)
		IJ.log("\n--------------------------\n")
Esempio n. 27
0
width = r.getSizeX()
height = r.getSizeY()
md = r.getGlobalMetadata()
# print(type(md))
# print(num, width, height)
stack = ImageStack(width, height)
i = 0
ip = r.openProcessors(i)[0]
stack.addSlice("1", ip);
imp = ImagePlus("foo", stack);
r.close()
imp.show()
IJ.run("Enhance Contrast", "saturated=0.35")

imageReader = ImageReader()
meta = MetadataTools.createOMEXMLMetadata()
imageReader.setMetadataStore(meta)
imageReader.setId(filePath)
pSizeX = meta.getPixelsPhysicalSizeX(0)
pSizeY = meta.getPixelsPhysicalSizeY(0)
imageReader.close()
print(pSizeX, pSizeY)
print(meta.getPixelsSizeX(0))
print(meta.getPixelsSizeY(0))






Esempio n. 28
0
def write_fused(output_path,channel,sizeZ,theC,physX,physY,physZ):

	IJ.log("Writing fused data")

	# number of slices will determine filename format
	digits = "00"
	if sizeZ < 100:
		digits = "0"
	if sizeZ < 10:
		digits = ""

	# get the base metadata from the first fused image
	meta = MetadataTools.createOMEXMLMetadata()
	reader = get_reader(output_path+"img_t1_z%s1_c1"%digits,meta)
	reader.close()
	
	# reset some metadata
	meta.setPixelsPhysicalSizeX(physX,0)
	meta.setPixelsPhysicalSizeY(physY,0)
	meta.setPixelsPhysicalSizeZ(physZ,0)
	meta.setPixelsSizeZ(PositiveInteger(sizeZ),0)
	meta.setChannelID("Channel:0:" + str(0), 0, 0)
	spp = channel['spp']
	meta.setChannelSamplesPerPixel(spp, 0, 0)
	name = channel['name']
	color = channel['color']
	meta.setChannelName(name,0,0)
	meta.setChannelColor(color,0,0)
		
	# determine the number of subsets that need to be written
	slices_per_subset = 200
	num_output_files = divmod(sizeZ,slices_per_subset)
	fpaths = []
	if num_output_files[0] == 0:
		nslices = [sizeZ]
		num_output_files = 1
		fpaths.append("%sfused_C%s.ome.tif"%(output_path,str(theC-1)))
	else:
		nslices = []
		for n in range(num_output_files[0]):
			nslices.append(slices_per_subset)

		if num_output_files[1] > 0:
			nslices.append(num_output_files[1])		
		
		for s in range(len(nslices)):
			fpaths.append("%sfused_C%s_subset%s.ome.tif"%(output_path,str(theC-1),str(s)))

	# setup a writer
	writer = ImageWriter()
	writer.setCompression('LZW')
	writer.setMetadataRetrieve(meta)
	writer.setId(fpaths[0])

	# write the slices, changing the output file when necessary
	theZ = 0
	for f in range(len(fpaths)):
		meta.setImageName(os.path.basename(fpaths[f]),0)
		writer.changeOutputFile(fpaths[f])
		for s in range(nslices[f]):
			fpath = output_path+"img_t1_z%s%s_c1"%(digits,str(theZ+1))
			if (len(digits) == 1) and (theZ+1 > 9):
				fpath = output_path+"img_t1_z%s_c1"%(str(theZ+1))
			if (len(digits) == 2) and (theZ+1 > 9):
				fpath = output_path+"img_t1_z0%s_c1"%(str(theZ+1))
			if (len(digits) == 2) and (theZ+1 > 99):
				fpath = output_path+"img_t1_z%s_c1"%(str(theZ+1))
			IJ.log("writing slice %s"%os.path.basename(fpath))
			m = MetadataTools.createOMEXMLMetadata()
			
			r = get_reader(fpath,m)
			m.setPixelsPhysicalSizeX(physX,0)
			m.setPixelsPhysicalSizeY(physY,0)
			m.setPixelsPhysicalSizeZ(physZ,0)
			m.setChannelID("Channel:0:" + str(0), 0, 0)
			spp = channel['spp']
			m.setChannelSamplesPerPixel(spp, 0, 0)
			name = channel['name']
			color = channel['color']
			m.setChannelName(name,0,0)
			m.setChannelColor(color,0,0)
			writer.saveBytes(theZ,r.openBytes(0))
			r.close()
			theZ += 1
	writer.close()
Esempio n. 29
0
import xml.etree.ElementTree as etree

import ij
from loci.formats import ImageReader
from loci.formats import MetadataTools


def get_reader(file, complete_meta):
    reader = ImageReader()
    reader.setMetadataStore(complete_meta)
    reader.setId(file)
    return reader


# files is a comma separated list of paths to the first ztc
basepath = "/Users/uqdmatt2/Desktop/"
files = [basepath + "Original_File/example stitch_Z0_T0_C0.tiff"]

for fpath in files:
    original_metadata = MetadataTools.createOMEXMLMetadata()
    reader = get_reader(fpath, original_metadata)
    reader.close()
    xml_data = original_metadata.dumpXML()

    outputdir = os.path.dirname(fpath)
    shortname = os.path.basename(fpath)[:-5]
    outputpath = os.path.join(outputdir, shortname + ".xml")
    root = etree.fromstring(xml_data.decode('utf-8', 'ignore'))
    et = etree.ElementTree(root)
    et.write(outputpath)
Esempio n. 30
0
def link_slices(output_path,channel,sizeZ,theC,physX,physY,physZ):

	IJ.log("Linking z slices")

	# number of slices will determine filename format
	digits = "00"
	if sizeZ < 100:
		digits = "0"
	if sizeZ < 10:
		digits = ""

	# get the base metadata from the first fused image
	z0meta = MetadataTools.createOMEXMLMetadata()
	fused = glob.glob(output_path + "fused*")
	first_fused = fused[0]
	reader = get_reader(first_fused,z0meta)
	z0meta.setPixelsSizeZ(PositiveInteger(sizeZ),0)
	reader.close()

	for z in range(sizeZ):
		fpath = fused[z]
		IJ.log("writing metadata to slice %s"%os.path.basename(fpath))
		m = MetadataTools.createOMEXMLMetadata()
		r = get_reader(fpath,m)

		# set the TiffData elements on the first plane
		# setTiffData(IFD, image index, TiffData index)
		z0meta.setTiffDataIFD(NNI(0),0,z)
		# setTiffDataPlaneCount(planecount, image index, TiffData index)
		z0meta.setTiffDataPlaneCount(NNI(1),0,z)
		# setTiffDataFirstC(firstC, image index, TiffData index)
		z0meta.setTiffDataFirstC(NNI(0),0,z)
		# setTiffDataFirstC(firstT, image index, TiffData index)
		z0meta.setTiffDataFirstT(NNI(0),0,z)
		# setTiffDataFirstC(firstZ, image index, TiffData index)
		z0meta.setTiffDataFirstZ(NNI(0),z,z)
		# setUUIDFileName(filename, image index, TiffData index)
		z0meta.setUUIDFileName(m.getUUIDFileName(0,0),0,0)
		# setUUIDValue(value, image index, TiffData index)
		z0meta.setUUIDValue(m.getUUIDValue(0,0),0,0)

		# set the physical pixel sizes on each plane
		m.setPixelsPhysicalSizeX(physX,0)
		m.setPixelsPhysicalSizeY(physY,0)
		m.setPixelsPhysicalSizeZ(physZ,0)

		# set the channel attributes on each plane
		m.setChannelID("Channel:0:" + str(0), 0, 0)
		spp = channel['spp']
		m.setChannelSamplesPerPixel(spp, 0, 0)
		name = channel['name']
		color = channel['color']
		m.setChannelName(name,0,0)
		m.setChannelColor(color,0,0)
		r.close()

		# replace the metadata in the slice
		if z > 0:
			replace_meta(m,fpath)

	replace_meta(z0meta,first_fused)
Esempio n. 31
0
import os
import xml.etree.ElementTree as etree

import ij
from loci.formats import ImageReader
from loci.formats import MetadataTools

def get_reader(file, complete_meta):
	reader = ImageReader()
	reader.setMetadataStore(complete_meta)
	reader.setId(file)
	return reader

# files is a comma separated list of paths to the first ztc
basepath = "/Users/uqdmatt2/Desktop/"	
files = [basepath+"Original_File/example stitch_Z0_T0_C0.tiff"]

for fpath in files:
	original_metadata = MetadataTools.createOMEXMLMetadata()
	reader = get_reader(fpath,original_metadata)
	reader.close()
	xml_data = original_metadata.dumpXML()

	outputdir = os.path.dirname(fpath)
	shortname = os.path.basename(fpath)[:-5]	
	outputpath = os.path.join(outputdir,shortname+".xml")
	root = etree.fromstring(xml_data.decode('utf-8','ignore'))
	et = etree.ElementTree(root)
	et.write(outputpath)

	def readZeissHeader(self, infoStr):		
		# This is incredibly difficult to get working as (date, time, voxels) are in different obscure places in lsm and czi
		# Furthermore, just trying to read the raw ome xls is futile
		#
		# parsing ome xml as a string and searching it with regular expression(re) does not work
		# it is beyond the scope of my work to figure this out
		# the fact that it does not work and there is little documentaiton is a pretty big waste of time
		#
		# get and parse xml to find date/time
		#fi = self.imp.getOriginalFileInfo(); # returns a FileInfo object
		#omexml = fi.description #omexml is a string
		#omexml = omexml.encode('utf-8')
		#omexml = omexml.replaceAll("[^\\x20-\\x7e]", "") # see: https://stackoverflow.com/questions/2599919/java-parsing-xml-document-gives-content-not-allowed-in-prolog-error

		# (1) try and search the ome xml like a string, this gives errors
		#docsPattern = '<AcquisitionDate>.*</AcquisitionDate>'
		#searchresult = re.search(docsPattern, omexml)
		#print 'searchresult:', searchresult.group(0)
		
		# 2) treat the ome xml like any other xml (because it's xml, right?)
		# well this raises errors too
		#omexml has <AcquisitionDate>2016-08-17T15:21:50</AcquisitionDate>
		#import xml.etree.ElementTree
		#e = xml.etree.ElementTree.fromstring(omexml).getroot()		#print omexml
		#for atype in e.findall('AcquisitionDate'):
		#    print 'AcquisitionDate:', atype #.get('foobar')
		#
		#

		if self.islsm:
			# lsm have date hidden in omeMeta.getImageAcquisitionDate(0)
			# this is copied from code at: https://gist.github.com/ctrueden/6282856
			reader = ImageReader()
			omeMeta = MetadataTools.createOMEXMLMetadata() #omeMeta.getImageAcquisitionDate(0)
			reader.setMetadataStore(omeMeta)
			reader.setId(self.filepath)
			#seriesCount = reader.getSeriesCount()
			dateTimeStr = omeMeta.getImageAcquisitionDate(0) #2016-08-17T16:36:26
			reader.close()
			if dateTimeStr:
				self.dateStr, self.timeStr = dateTimeStr.toString().split('T')
				self.dateStr = bFixDate(self.dateStr)
				self.timeStr = bFixTime(self.timeStr)
				#bPrintLog('LSM date/time is: ' + self.dateStr + ' ' + self.timeStr, 3)
			else:
				bPrintLog('WARNING: did not get Zeiss date/time string')

			# lsm have voxels in infoStr
			for line in infoStr.split('\n'):
				#print line
				if line.find('VoxelSizeX') != -1:
					self.voxelx = float(line.split('=')[1])
				if line.find('VoxelSizeY') != -1:
					self.voxely = float(line.split('=')[1])
				if line.find('VoxelSizeZ') != -1:
					self.voxelz = float(line.split('=')[1])
				if line.find('SizeC') != -1:
					self.numChannels = int(line.split('=')[1])
				#if line.find('BitsPerPixel') and not line.startswith('Experiment') != -1: # 20170811, startswith is for czi
				#	self.bitsPerPixel = int(line.split('=')[1])
				if line.find('RecordingZoomX#1') != -1:
					self.zoom = int(line.split('=')[1])

		if self.isczi:
			# czi has date/time in infoStr (lsm does not)
			for line in infoStr.split('\n'):
				if line.find('CreationDate #1') != -1: # w.t.f. is #1 referring to?
					lhs, rhs = line.split('=')
					rhs = rhs.replace('  ', ' ')
					if rhs.startswith(' '):
						rhs = rhs[1:-1]
					#print "lhs: '" + lhs + "'" + "rhs: '" + rhs + "'"
					if rhs.find('T') != -1:
						self.dateStr, self.timeStr = rhs.split('T')
					else:
						self.dateStr, self.timeStr = rhs.split(' ')
					self.dateStr = bFixDate(self.dateStr)
					self.timeStr = bFixTime(self.timeStr)
					#bPrintLog('CZI date/time is: ' + self.dateStr + ' ' + self.timeStr, 3)
				# .czi
				# <Pixels BigEndian="false" DimensionOrder="XYCZT" ID="Pixels:0" Interleaved="false" PhysicalSizeX="0.20756645602494875" PhysicalSizeXUnit="µm" PhysicalSizeY="0.20756645602494875" PhysicalSizeYUnit="µm" PhysicalSizeZ="0.75" PhysicalSizeZUnit="µm" SignificantBits="8" SizeC="1" SizeT="1" SizeX="1024" SizeY="1024" SizeZ="50" Type="uint8">

			# czi have voxel in calibration
			self.voxelx = self.imp.getCalibration().pixelWidth; 
			self.voxely = self.imp.getCalibration().pixelHeight; 
			self.voxelz = self.imp.getCalibration().pixelDepth; 
			#bPrintLog('readZeissHeader() read czi scale as: ' + str(self.voxelx) + ' ' + str(self.voxely) + ' ' + str(self.voxelz), 3)

			# CLEARING self.infoStr for CZI ... it was WAY to big to parse in Map Manager
			self.infoStr = ''
Esempio n. 33
0
def process_time_points(root, files, outdir):
	'''Concatenate images and write ome.tiff file. If image contains already multiple time points just copy the image'''
	concat = 1
	files.sort()
	options = ImporterOptions()
	options.setId(files[0])
	options.setVirtual(1)
	image = BF.openImagePlus(options)
	image = image[0]
	if image.getNFrames() > 1:
		IJ.log(files[0] + " Contains multiple time points. Can only concatenate single time points! Don't do anything!")
		image.close()
		return
	
	width  = image.getWidth()
	height = image.getHeight()
	for patt in pattern:
		outName = re.match(patt, os.path.basename(files[0]))
		if outName is None:
			continue
		if outdir is None:
			outfile = os.path.join(root, outName.group(1) + '.ome.tif')
		else:
			outfile =  os.path.join(outdir, outName.group(1) + '.ome.tif')
		reader = ImageReader()
		reader.setMetadataStore(MetadataTools.createOMEXMLMetadata())
		reader.setId(files[0])
		timeInfo = []
		omeOut = reader.getMetadataStore()
		omeOut = setUpXml(omeOut, image, files)
		reader.close()
		image.close()
		IJ.log ('Concatenates ' + os.path.join(root, outName.group(1) + '.ome.tif'))
		itime = 0
		try:
			for ifile, fileName in enumerate(files):
				print fileName
				omeMeta = MetadataTools.createOMEXMLMetadata()
	
				reader.setMetadataStore(omeMeta)
				reader.setId(fileName)
				#print omeMeta.getPlaneDeltaT(0,0)
				#print omeMeta.getPixelsTimeIncrement(0)
				
				if fileName.endswith('.czi'):
					if ifile == 0:
						T0 = omeMeta.getPlaneDeltaT(0,0).value()
					dT = omeMeta.getPlaneDeltaT(0,0).value() - T0
					unit =  omeMeta.getPlaneDeltaT(0,0).unit()
				else:
					timeInfo.append(getTimePoint(reader, omeMeta))
	 				unit = omeMeta.getPixelsTimeIncrement(0).unit()
					try:
						dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2)
					except:
						dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds
				
				nrImages = reader.getImageCount()
	
	
				for i in range(0, reader.getImageCount()):
	
					try:
						omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages)
					except TypeError:
						omeOut.setPlaneDeltaT(Time(dT, unit),0, i + itime*nrImages)
					omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages)
					omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages)
					omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages)
					omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages)
					omeOut.setPlaneTheT(NonNegativeInteger(itime), 0, i + itime*nrImages)
					omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages)
				itime = itime + 1
				reader.close()
	
				IJ.showProgress(files.index(fileName), len(files))
			try:
				incr = float(dT/(len(files)-1))
			except:
				incr = 0
			
			try:
				omeOut.setPixelsTimeIncrement(incr, 0)
			except TypeError:
				#new Bioformats >5.1.x
				omeOut.setPixelsTimeIncrement(Time(incr, unit),0)
			
			outfile = concatenateImagePlus(files, outfile)
			if outfile is not None:
				filein = RandomAccessInputStream(outfile)
				fileout = RandomAccessOutputStream(outfile)
				saver = TiffSaver(fileout, outfile)
				saver.overwriteComment(filein,omeOut.dumpXML())
				fileout.close()
				filein.close()
	
	
		except:
			traceback.print_exc()
		finally:
			#close all possible open files
			try:
				reader.close()
			except:
				pass
			try:
				filein.close()
			except:
				pass
			try:
				fileout.close()
			except: