def processFile(filename, inDir, outDir, dichroics, mergeList):

	if mergeList is None:
		merge = False
	else:
		merge = True
	
	filenameExExt = os.path.splitext(filename)[0]
	filepath = inDir + filename
      
	# parse metadata
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(filepath)
	numChannels = reader.getSizeC()
	numSlices = reader.getSizeZ()
	numFrames = reader.getSizeT()
	seriesCount = reader.getSeriesCount()

	globalMetadata = reader.getGlobalMetadata()
	seriesMetadata = reader.getSeriesMetadata()

	objLensName = globalMetadata['- Objective Lens name #1']

	areaRotation = float(seriesMetadata['area rotation #1'])
	acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1'])
	if 'regionInfo rotation #1' in seriesMetadata:
		regionInfoRotation = float(seriesMetadata['regionInfo rotation #1'])
	else:
		regionInfoRotation = float(0)

	totalRotation = areaRotation + regionInfoRotation
	physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
	physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
	pxSizeX = physSizeX.value(UNITS.MICROM)
	pxSizeY = physSizeY.value(UNITS.MICROM)

	# log metadata
	IJ.log("\nMETADATA")
	#IJ.log("Filename: " + filepath)
	IJ.log("Number of series: " + str(seriesCount))
	IJ.log("Number of channels: " + str(numChannels))
	IJ.log("Number of frames: " + str(numFrames))
	IJ.log("Number of slices: " + str(numSlices))
	IJ.log("Objective lens: " + objLensName)
	IJ.log("FOV rotation: " + str(areaRotation))
	IJ.log("ROI rotation: " + str(regionInfoRotation))
	IJ.log("Total rotation: " + str(totalRotation))
	IJ.log("Pixel size:")
	IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol())
	IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol())
  
	if merge:
		tifDir = outDir + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated temporary folder: " + tifDir + "\n")
		else:
			IJ.log("Unable to create temporary folder!\n")
	else:
		tifDir = outDir + filenameExExt + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated subfolder: " + tifDir + "\n")
		else:
			IJ.log("\nSubfolder " + tifDir +  " already exists.\n")

	# correct images
	tifFilePaths = []
	for i in range(numChannels):
		ip = extractChannel(oirFile=filepath, ch=i)
		if dichroics[i] == "DM1":
			IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.")
		else:
			offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]])
			xom = offsets['x']
			yom = offsets['y']
			if abs(totalRotation) > 0.1:
				rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation)
				xom = rotOff['x']
				yom = rotOff['y']
			xop = int(round(xom/pxSizeX))
			yop = int(round(yom/pxSizeY))
			IJ.log("Channel " + str(i+1) + " offsets")
			IJ.log("\t\tMicrometres")
			IJ.log("\t\t\t\tx = " + str(xom))
			IJ.log("\t\t\t\ty = " + str(yom))
			IJ.log("\t\tPixels")
			IJ.log("\t\t\t\tx = " + str(xop))
			IJ.log("\t\t\t\ty = " + str(yop))
			IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack")

		tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif"
		tifFilePaths.append(tifFilePath)
		if os.path.exists(tifFilePath):
			IJ.log("\nOutput file exists: " + tifFilePath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
			return
		FileSaver(ip).saveAsTiff(tifFilePath)

	if merge:
		max_list = []
		for i in range(len(mergeList)):
			if mergeList[i] != None:
				mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]])
				channel = mergeList[i]#https://python.hotexamples.com/examples/ij.plugin/RGBStackMerge/mergeChannels/python-rgbstackmerge-mergechannels-method-examples.html
				projector = ZProjector(channel)
				projector.setMethod(ZProjector.MAX_METHOD)
				projector.doProjection()
				max_list.append(projector.getProjection())
		merged = RGBStackMerge.mergeChannels(mergeList, False)
		merged_max = RGBStackMerge.mergeChannels(max_list, False)
		mergedChannelFilepath = outDir + filenameExExt + ".tif"
		maxMergedChannelFilepath = outDir + filenameExExt + "_max.tif"
		if os.path.exists(mergedChannelFilepath):
			IJ.log("\nOutput file exists: " + mergedChannelFilepath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
		FileSaver(merged).saveAsTiff(mergedChannelFilepath)
		FileSaver(merged_max).saveAsTiff(maxMergedChannelFilepath)
		for tf in tifFilePaths:
			os.remove(tf)
		os.rmdir(tifDir)	

	IJ.log("\nFinished processing file:\n" + filepath + "\n")
	if merge:
		IJ.log("Image file with channels aligned:\n" + outDir + filenameExExt + ".tif\n")
	else:
		IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
def get_ome_metadata(source, imagenames):
    """Get the stage coordinates and calibration from the ome-xml for a given list of images

    Arguments:
        source {string} -- Path to the images
        imagenames {list} -- list of images filenames

    Returns:
        a tuple that contains:
        dimensions {int} -- number of dimensions (2D or 3D)
        stage_coordinates_x {list} -- the abosolute stage x-coordinates from ome-xml metadata
        stage_coordinates_y {list} -- the abosolute stage y-coordinates from ome-xml metadata
        stage_coordinates_z {list} -- the abosolute stage z-coordinates from ome-xml metadata
        relative_coordinates_x_px {list} -- the relative stage x-coordinates in px
        relative_coordinates_y_px {list} -- the relative stage y-coordinates in px
        relative_coordinates_z_px {list} -- the relative stage z-coordinates in px
        image_calibration {list} -- x,y,z image calibration in unit/px
        calibration_unit {string} -- image calibration unit
        image_dimensions_czt {list} -- number of images in dimensions c,z,t
    """

    # open an array to store the abosolute stage coordinates from metadata
    stage_coordinates_x = []
    stage_coordinates_y = []
    stage_coordinates_z = []

    for counter, image in enumerate(imagenames):

        # parse metadata
        reader = ImageReader()
        omeMeta = MetadataTools.createOMEXMLMetadata()
        reader.setMetadataStore(omeMeta)
        reader.setId(source + str(image))

        # get hyperstack dimensions from the first image
        if counter == 0:
            frame_size_x = reader.getSizeX()
            frame_size_y = reader.getSizeY()
            frame_size_z = reader.getSizeZ()
            frame_size_c = reader.getSizeC()
            frame_size_t = reader.getSizeT()

            # note the dimensions
            if frame_size_z == 1:
                dimensions = 2
            if frame_size_z > 1:
                dimensions = 3

            # get the physical calibration for the first image series
            physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
            physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
            physSizeZ = omeMeta.getPixelsPhysicalSizeZ(0)

            # workaround to get the z-interval if physSizeZ.value() returns None.
            z_interval = 1
            if physSizeZ is not None:
                z_interval = physSizeZ.value()

            if frame_size_z > 1 and physSizeZ is None:
                print "no z calibration found, trying to recover"
                first_plane = omeMeta.getPlanePositionZ(0, 0)
                next_plane_imagenumber = frame_size_c + frame_size_t - 1
                second_plane = omeMeta.getPlanePositionZ(
                    0, next_plane_imagenumber)
                z_interval = abs(
                    abs(first_plane.value()) - abs(second_plane.value()))
                print "z-interval seems to be: ", z_interval

            # create an image calibration
            image_calibration = [
                physSizeX.value(),
                physSizeY.value(), z_interval
            ]
            calibration_unit = physSizeX.unit().getSymbol()
            image_dimensions_czt = [frame_size_c, frame_size_z, frame_size_t]

        reader.close()

        # get the plane position in calibrated units
        current_position_x = omeMeta.getPlanePositionX(0, 0)
        current_position_y = omeMeta.getPlanePositionY(0, 0)
        current_position_z = omeMeta.getPlanePositionZ(0, 0)

        # get the absolute stage positions and store them
        pos_x = current_position_x.value()
        pos_y = current_position_y.value()

        if current_position_z is None:
            print "the z-position is missing in the ome-xml metadata."
            pos_z = 1.0
        else:
            pos_z = current_position_z.value()

        stage_coordinates_x.append(pos_x)
        stage_coordinates_y.append(pos_y)
        stage_coordinates_z.append(pos_z)

    # calculate the store the relative stage movements in px (for the grid/collection stitcher)
    relative_coordinates_x_px = []
    relative_coordinates_y_px = []
    relative_coordinates_z_px = []

    for i in range(len(stage_coordinates_x)):
        rel_pos_x = (stage_coordinates_x[i] -
                     stage_coordinates_x[0]) / physSizeX.value()
        rel_pos_y = (stage_coordinates_y[i] -
                     stage_coordinates_y[0]) / physSizeY.value()
        rel_pos_z = (stage_coordinates_z[i] -
                     stage_coordinates_z[0]) / z_interval

        relative_coordinates_x_px.append(rel_pos_x)
        relative_coordinates_y_px.append(rel_pos_y)
        relative_coordinates_z_px.append(rel_pos_z)

    return (dimensions, stage_coordinates_x, stage_coordinates_y,
            stage_coordinates_z, relative_coordinates_x_px,
            relative_coordinates_y_px, relative_coordinates_z_px,
            image_calibration, calibration_unit, image_dimensions_czt)
def processFile():
	# start logging
	IJ.log("\n______________________________\n\n\t\tOlympus DM correction\n\t\tVersion " + pluginVersion +"\n______________________________\n")

	# ask user for file
	ofd = OpenDialog("Choose a file", None)  
	filename = ofd.getFileName()  
  
	if filename is None:  
  		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
  		return

  	directory = ofd.getDirectory()  
  	filepath = directory + filename  
  	IJ.log("File path: " + filepath)

	if not filename.endswith(".oir"):
		IJ.log("Not an Olympus (.oir) file.\nNo image to process.\n")
		return

	filenameExExt = os.path.splitext(filename)[0]
      
	# parse metadata
	reader = ImageReader()
	omeMeta = MetadataTools.createOMEXMLMetadata()
	reader.setMetadataStore(omeMeta)
	reader.setId(filepath)
	numChannels = reader.getSizeC()
	numSlices = reader.getSizeZ()
	numFrames = reader.getSizeT()
	seriesCount = reader.getSeriesCount()

	globalMetadata = reader.getGlobalMetadata()
	seriesMetadata = reader.getSeriesMetadata()

	objLensName = globalMetadata['- Objective Lens name #1']

	areaRotation = float(seriesMetadata['area rotation #1'])
	acquisitionValueRotation = float(seriesMetadata['acquisitionValue rotation #1'])
	if 'regionInfo rotation #1' in seriesMetadata:
		regionInfoRotation = float(seriesMetadata['regionInfo rotation #1'])
	else:
		regionInfoRotation = float(0)

	totalRotation = areaRotation + regionInfoRotation
	physSizeX = omeMeta.getPixelsPhysicalSizeX(0)
	physSizeY = omeMeta.getPixelsPhysicalSizeY(0)
	pxSizeX = physSizeX.value(UNITS.MICROM)
	pxSizeY = physSizeY.value(UNITS.MICROM)

	# log metadata
	IJ.log("\nMETADATA")
	#IJ.log("Filename: " + filepath)
	IJ.log("Number of series: " + str(seriesCount))
	IJ.log("Number of channels: " + str(numChannels))
	IJ.log("Number of frames: " + str(numFrames))
	IJ.log("Number of slices: " + str(numSlices))
	IJ.log("Objective lens: " + objLensName)
	IJ.log("FOV rotation: " + str(areaRotation))
	IJ.log("ROI rotation: " + str(regionInfoRotation))
	IJ.log("Total rotation: " + str(totalRotation))
	IJ.log("Pixel size:")
	IJ.log("\t\tX = " + str(physSizeX.value()) + " " + physSizeX.unit().getSymbol())
	IJ.log("\t\tY = " + str(physSizeY.value()) + " " + physSizeY.unit().getSymbol())

	# ask user to identify dichroic mirror used for each channel  
	gdDM = GenericDialog("Dichroic mirrors")
	DMs = ["DM1", "DM2", "DM3", "DM4", "DM5"] 
	for i in range(numChannels):
		gdDM.addChoice("Channel " + str(i+1), DMs, DMs[0])
	gdDM.addCheckbox("Merge channels", False) 
	gdDM.showDialog()
	if gdDM.wasCanceled():
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")
		return
	dichroics = []
	for i in range(numChannels):
		dichroics.append(gdDM.getNextChoice())
	merge = gdDM.getNextBoolean()
	IJ.log("\nUser selected dichroic mirrors")
	for i in range(numChannels):
		IJ.log("\t\tChannel " + str(i+1) + ": " + dichroics[i])	

	if merge:
		channels = []
		chDict = {}
		for i in range(numChannels):
			chName = "Channel"+str(i+1)
			channels.append(chName)
			chDict[chName] = i
		channels.append("NONE")
		colourChoices = ["red", "green", "blue", "gray", "cyan", "magenta", "yellow"]
		gdMerge = GenericDialog("Merge channels")
		for c in colourChoices:
			gdMerge.addChoice(c + ":", channels, channels[numChannels])
		gdMerge.showDialog()
		if gdMerge.wasCanceled():
			IJ.log("User canceled the dialog!\nImage processing canceled!\n")
			return
		IJ.log("\nUser selected channel colours")
		mergeList = []
		for i in range(len(colourChoices)):
			ch = gdMerge.getNextChoice()
			if ch == "NONE":
				mergeList.append(None)
			else:
				mergeList.append(chDict[ch])
				IJ.log("\t\t" + colourChoices[i] + ": " + ch)

	# ask user for an output directory
	dc = DirectoryChooser("Choose folder for output")  
	od = dc.getDirectory()    
	if od is None:  
		IJ.log("User canceled the dialog!\nImage processing canceled!\n")  
		return  
  
	if merge:
		tifDir = od + "." + str(datetime.now()).replace(" ", "").replace(":", "") + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated temporary folder: " + tifDir + "\n")
		else:
			IJ.log("Unable to create temporary folder!\n")
	else:
		tifDir = od + filenameExExt + "/"
		if not os.path.exists(tifDir):
			os.makedirs(tifDir)
			IJ.log("\nCreated subfolder: " + tifDir + "\n")
		else:
			IJ.log("\nSubfolder " + tifDir +  " already exists")

	# correct images
	tifFilePaths = []
	for i in range(numChannels):
		ip = extractChannel(oirFile=filepath, ch=i)
		if dichroics[i] == "DM1":
			IJ.log("Channel " + str(i+1) + " was imaged using DM1, so no correction required.")
		else:
			offsets = getOffset(obj=objLensName,dm=dichroicDict[dichroics[i]])
			xom = offsets['x']
			yom = offsets['y']
			if abs(totalRotation) > 0.1:
				rotOff = rotateOffset(x=xom, y=yom, angle=-totalRotation)
				xom = rotOff['x']
				yom = rotOff['y']
			xop = int(round(xom/pxSizeX))
			yop = int(round(yom/pxSizeY))
			IJ.log("Channel " + str(i+1) + " offsets")
			IJ.log("\t\tMicrometres")
			IJ.log("\t\t\t\tx = " + str(xom))
			IJ.log("\t\t\t\ty = " + str(yom))
			IJ.log("\t\tPixels")
			IJ.log("\t\t\t\tx = " + str(xop))
			IJ.log("\t\t\t\ty = " + str(yop))
			IJ.run(ip, "Translate...", "x=" + str(-xop) + " y=" + str(-yop) + " interpolation=None stack")

		tifFilePath = tifDir + filenameExExt + "_ch_"+str(i+1)+".tif"
		tifFilePaths.append(tifFilePath)
		if os.path.exists(tifFilePath):
			IJ.log("\nOutput file exists: " + tifFilePath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
			return
		FileSaver(ip).saveAsTiff(tifFilePath)

	if merge:
		for i in range(len(mergeList)):
			if mergeList[i] != None:
				mergeList[i] = readSingleChannelImg(tifFilePaths[mergeList[i]])
		merged = RGBStackMerge.mergeChannels(mergeList, False)
		mergedChannelFilepath = od + filenameExExt + ".tif"
		if os.path.exists(mergedChannelFilepath):
			IJ.log("\nOutput file exists: " + mergedChannelFilepath)
			IJ.log("Rerun plugin choosing a different output folder")
			IJ.log("or delete file and then rerun plugin.")
			IJ.log("Image processing terminated!\n")
		FileSaver(merged).saveAsTiff(mergedChannelFilepath)
		for tf in tifFilePaths:
			os.remove(tf)
		os.rmdir(tifDir)
			
	IJ.log("\nFinished processing file:\n" + filepath + "\n")
	if merge:
		IJ.log("Image file with channels aligned:\n" + od + filenameExExt + ".tif\n")
	else:
		IJ.log("Aligned images (one tiff file for each channel) can be found in:\n" + tifDir + "\n")
예제 #4
0
def run():
    t_start = datetime.now()
    image_paths = glob(os.path.join(str(import_dir.getPath()), '*tif'))

    print '\tread image metadata'
    reader = ImageReader()
    in_meta = MetadataTools.createOMEXMLMetadata()
    reader.setMetadataStore(in_meta)

    x_dims = []
    y_dims = []
    z_dims = []
    c_dims = []
    t_dims = []
    eff = []
    spp = []

    for image_path in image_paths:
        print '\t  parse %s' % (image_path)
        reader.setId(image_path)
        x_dims.append(reader.getSizeX())
        y_dims.append(reader.getSizeY())
        z_dims.append(reader.getSizeZ())
        c_dims.append(reader.getSizeC())
        t_dims.append(reader.getSizeT())
        eff.append(reader.imageCount / z_dims[-1] / t_dims[-1])
        spp.append(reader.getSizeC() / eff[-1])

    format = FormatTools.getPixelTypeString(reader.getPixelType())
    series = reader.getSeries()
    big_endian = Boolean.FALSE
    order = reader.getDimensionOrder()
    reader.close()

    # Compute the dimensions of the output file
    x_dim = max(x_dims)
    y_dim = max(y_dims)
    z_dim = max(z_dims)
    c_dim = max(c_dims)
    t_dim = max(t_dims)

    print '\t  series: %i' % series
    print '\t  format: %s' % format
    print '\t  dimension order: %s' % order
    print '\t  x: %s -> %i' % (x_dims, x_dim)
    print '\t  y: %s -> %i' % (y_dims, y_dim)
    print '\t  z: %s -> %i' % (z_dims, z_dim)
    print '\t  c: %s -> %i' % (c_dims, c_dim)
    print '\t  t: %s -> %i' % (t_dims, t_dim)
    print '\t  effective size c: %s' % eff
    print '\t  samples per pixel: %s' % spp

    # Get the time dimension from the number of input files
    t_dim = len(image_paths)

    # TODO: Tried to work out the order with Axes class, got something weird though.
    dimensions = [Short(x_dim), Short(y_dim), Short(c_dim), Short(z_dim)]

    pixels_per_plane = x_dim * y_dim

    # Assemble the metadata for the output file
    out_meta = MetadataTools.createOMEXMLMetadata()
    out_meta.setImageID(MetadataTools.createLSID('Image', series), series)
    out_meta.setPixelsID(MetadataTools.createLSID('Pixels', series), series)
    out_meta.setPixelsBinDataBigEndian(Boolean.TRUE, 0, 0)
    out_meta.setPixelsDimensionOrder(DimensionOrder.fromString(order), series)
    out_meta.setPixelsType(PixelType.fromString(format), series)
    out_meta.setPixelsSizeX(PositiveInteger(x_dim), series)
    out_meta.setPixelsSizeY(PositiveInteger(y_dim), series)
    out_meta.setPixelsSizeZ(PositiveInteger(z_dim), series)
    out_meta.setPixelsSizeC(PositiveInteger(c_dim), series)
    out_meta.setPixelsSizeT(PositiveInteger(t_dim), series)

    for c in range(c_dim):
        out_meta.setChannelID(MetadataTools.createLSID('Channel', series, c),
                              series, c)
        out_meta.setChannelSamplesPerPixel(PositiveInteger(1), series, c)

    # Initialize the BF writer
    result_path = os.path.join(result_dir.getPath(), result_name)
    writer = ImageWriter()
    writer.setMetadataRetrieve(out_meta)
    writer.setId(result_path)
    print '\tcreated to %s' % (result_path)

    # Write the stacks into the output file
    N = len(image_paths)
    for i, image_path in enumerate(image_paths):
        status.showStatus(i, N, "catenating %i of %i time-points" % (i, N))
        print '\t  processing %s' % (image_path)
        ds = io.open(image_path)
        xi = ds.dimensionIndex(Axes.X)
        xv = ds.dimension(xi)
        yi = ds.dimensionIndex(Axes.Y)
        yv = ds.dimension(yi)
        zi = ds.dimensionIndex(Axes.Z)
        zv = ds.dimension(zi)
        ti = ds.dimensionIndex(Axes.TIME)
        tv = ds.dimension(ti)
        ci = ds.dimensionIndex(Axes.CHANNEL)
        cv = ds.dimension(ci)

        dx = float(x_dim - xv) / 2.0
        dy = float(y_dim - yv) / 2.0
        dz = float(z_dim - zv) / 2.0
        print '\t     translation vector (dx, dy, dz) = (%f, %f, %f)' % (
            dx, dy, dz)

        if (dx != 0) or (dy != 0) or (dz != 0):
            stk = Views.translate(ds, long(dx), long(dy), long(0), long(dz))
            stk = Views.extendZero(stk)
        else:
            stk = Views.extendZero(ds.getImgPlus().getImg())

        print '\t     writing planes ',
        n = 0
        plane = 1
        byte_array = []
        interval_view = Views.interval(stk, \
                                       [Long(0), Long(0), Long(0), Long(0)], \
                                       [Long(x_dim - 1), Long(y_dim - 1), Long(c_dim - 1), Long(z_dim - 1)])
        cursor = interval_view.cursor()
        while cursor.hasNext():
            n += 1
            cursor.fwd()
            value = cursor.get().getInteger()
            bytes = DataTools.shortToBytes(value, big_endian)
            byte_array.extend(bytes)

            if n == pixels_per_plane:
                writer.saveBytes(plane - 1, byte_array)

                print '.',
                if ((plane) % 10) == 0:
                    print '\n\t                    ',

                byte_array = []
                plane += 1
                n = 0

        print ' '

    writer.close()
    t = datetime.now() - t_start
    print '\twrote %i planes to %s in %i sec.' % (plane - 1, result_path,
                                                  t.total_seconds())
    print '... done.'