def getSplitView(session, imageIds, pixelIds, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, roiLabel): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @mergedIndexes. The figure is returned as a PIL 'Image' @ session session for server access @ pixelIds a list of the Ids for the pixels we want to display @ splitIndexes a list of the channel indexes to display. Same channels for each image/row @ channelNames the Map of index:names for all channels @ zStart the start of Z-range for projection @ zEnd the end of Z-range for projection @ colourChannels the colour to make each column/ channel @ mergedIndexes list or set of channels in the merged image @ mergedColours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ roiService = session.getRoiService() re = session.createRenderingEngine() queryService = session.getQueryService() # only needed for movie # establish dimensions and roiZoom for the primary image # getTheseValues from the server rect = getRectangle(roiService, imageIds[0], roiLabel) if rect == None: raise("No ROI found for the first image.") roiX, roiY, roiWidth, roiHeight, yMin, yMax, tMin, tMax = rect roiOutline = ((max(width, height)) / 200 ) + 1 if roiZoom == None: # get the pixels for priamry image. pixels = queryService.get("Pixels", pixelIds[0]) sizeY = pixels.getSizeY().getValue() roiZoom = float(height) / float(roiHeight) log("ROI zoom set by primary image is %F X" % roiZoom) else: log("ROI zoom: %F X" % roiZoom) textGap = spacer/3 fontsize = 12 if width > 500: fontsize = 48 elif width > 400: fontsize = 36 elif width > 300: fontsize = 24 elif width > 200: fontsize = 16 font = imgUtil.getFont(fontsize) textHeight = font.getsize("Textq")[1] maxCount = 0 for row in imageLabels: maxCount = max(maxCount, len(row)) leftTextWidth = (textHeight + textGap) * maxCount + spacer maxSplitPanelWidth = 0 totalcanvasHeight = 0 mergedImages = [] roiSplitPanes = [] topSpacers = [] # space for labels above each row showLabelsAboveEveryRow = False invalidImages = [] # note any image row indexes that don't have ROIs. for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row)) if showLabelsAboveEveryRow: showTopLabels = True else: showTopLabels = (row == 0) # only show top labels for first row # need to get the roi dimensions from the server imageId = imageIds[row] roi = getRectangle(roiService, imageId, roiLabel) if roi == None: log("No Rectangle ROI found for this image") invalidImages.append(row) continue roiX, roiY, roiWidth, roiHeight, zMin, zMax, tStart, tEnd = roi pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() zStart = zMin zEnd = zMax # work out if any additional zoom is needed (if the full-sized image is different size from primary image) fullSize = (sizeX, sizeY) imageZoom = imgUtil.getZoomFactor(fullSize, width, height) if imageZoom != 1.0: log(" Scaling down the full-size image by a factor of %F" % imageZoom) log(" ROI location (top-left) x: %d y: %d and size width: %d height: %d" % (roiX, roiY, roiWidth, roiHeight)) log(" ROI time: %d - %d zRange: %d - %d" % (tStart+1, tEnd+1, zStart+1, zEnd+1)) # get the split pane and full merged image roiSplitPane, fullMergedImage, topSpacer = getROIsplitView (re, pixels, zStart, zEnd, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, mergedColours, roiX, roiY, roiWidth, roiHeight, roiZoom, tStart, spacer, algorithm, stepping, fontsize, showTopLabels) # and now zoom the full-sized merged image, add scalebar mergedImage = imgUtil.resizeImage(fullMergedImage, width, height) if scalebar: xIndent = spacer yIndent = xIndent sbar = float(scalebar) / imageZoom # and the scale bar will be half size if not addScalebar(sbar, xIndent, yIndent, mergedImage, pixels, overlayColour): log(" Failed to add scale bar: Pixel size not defined or scale bar is too large.") # draw ROI onto mergedImage... # recalculate roi if the image has been zoomed x = roiX / imageZoom y = roiY / imageZoom roiX2 = (roiX + roiWidth) / imageZoom roiY2 = (roiY + roiHeight) / imageZoom drawRectangle(mergedImage, x, y, roiX2, roiY2, overlayColour, roiOutline) # note the maxWidth of zoomed panels and total height for row maxSplitPanelWidth = max(maxSplitPanelWidth, roiSplitPane.size[0]) totalcanvasHeight += spacer + max(height+topSpacer, roiSplitPane.size[1]) mergedImages.append(mergedImage) roiSplitPanes.append(roiSplitPane) topSpacers.append(topSpacer) # remove the labels for the invalid images (without ROIs) invalidImages.reverse() for row in invalidImages: del imageLabels[row] # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 spacer top and bottom canvasWidth = leftTextWidth + width + spacer + maxSplitPanelWidth + spacer # figureSize = (canvasWidth, totalcanvasHeight + spacer) figureCanvas = Image.new("RGB", figureSize, (255,255,255)) rowY = spacer for row, image in enumerate(mergedImages): labelCanvas = getVerticalLabels(imageLabels[row], font, textGap) vOffset = (image.size[1] - labelCanvas.size[1]) / 2 imgUtil.pasteImage(labelCanvas, figureCanvas, spacer/2, rowY+topSpacers[row]+ vOffset) imgUtil.pasteImage(image, figureCanvas, leftTextWidth, rowY+topSpacers[row]) x = leftTextWidth + width + spacer imgUtil.pasteImage(roiSplitPanes[row], figureCanvas, x, rowY) rowY = rowY + max(image.size[1]+topSpacers[row], roiSplitPanes[row].size[1])+ spacer return figureCanvas
def getSplitView(conn, imageIds, pixelIds, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, roiLabel): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @mergedIndexes. The figure is returned as a PIL 'Image' @ session session for server access @ pixelIds a list of the Ids for the pixels we want to display @ splitIndexes a list of the channel indexes to display. Same channels for each image/row @ channelNames the Map of index:names for all channels @ zStart the start of Z-range for projection @ zEnd the end of Z-range for projection @ colourChannels the colour to make each column/ channel @ mergedIndexes list or set of channels in the merged image @ mergedColours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ roiService = conn.getRoiService() re = conn.createRenderingEngine() queryService = conn.getQueryService() # only needed for movie # establish dimensions and roiZoom for the primary image # getTheseValues from the server rect = getRectangle(roiService, imageIds[0], roiLabel) if rect is None: raise Exception("No ROI found for the first image.") roiX, roiY, roiWidth, roiHeight, yMin, yMax, tMin, tMax = rect roiOutline = ((max(width, height)) / 200) + 1 if roiZoom is None: # get the pixels for priamry image. pixels = queryService.get("Pixels", pixelIds[0]) sizeY = pixels.getSizeY().getValue() roiZoom = float(height) / float(roiHeight) log("ROI zoom set by primary image is %F X" % roiZoom) else: log("ROI zoom: %F X" % roiZoom) textGap = spacer/3 fontsize = 12 if width > 500: fontsize = 48 elif width > 400: fontsize = 36 elif width > 300: fontsize = 24 elif width > 200: fontsize = 16 font = imgUtil.getFont(fontsize) textHeight = font.getsize("Textq")[1] maxCount = 0 for row in imageLabels: maxCount = max(maxCount, len(row)) leftTextWidth = (textHeight + textGap) * maxCount + spacer maxSplitPanelWidth = 0 totalcanvasHeight = 0 mergedImages = [] roiSplitPanes = [] topSpacers = [] # space for labels above each row showLabelsAboveEveryRow = False invalidImages = [] # note any image row indexes that don't have ROIs. for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row)) if showLabelsAboveEveryRow: showTopLabels = True else: showTopLabels = (row == 0) # only show top labels for first row # need to get the roi dimensions from the server imageId = imageIds[row] roi = getRectangle(roiService, imageId, roiLabel) if roi is None: log("No Rectangle ROI found for this image") invalidImages.append(row) continue roiX, roiY, roiWidth, roiHeight, zMin, zMax, tStart, tEnd = roi pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() zStart = zMin zEnd = zMax # work out if any additional zoom is needed (if the full-sized image # is different size from primary image) fullSize = (sizeX, sizeY) imageZoom = imgUtil.getZoomFactor(fullSize, width, height) if imageZoom != 1.0: log(" Scaling down the full-size image by a factor of %F" % imageZoom) log(" ROI location (top-left) x: %d y: %d and size width:" " %d height: %d" % (roiX, roiY, roiWidth, roiHeight)) log(" ROI time: %d - %d zRange: %d - %d" % (tStart+1, tEnd+1, zStart+1, zEnd+1)) # get the split pane and full merged image roiSplitPane, fullMergedImage, topSpacer = getROIsplitView( re, pixels, zStart, zEnd, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, mergedColours, roiX, roiY, roiWidth, roiHeight, roiZoom, tStart, spacer, algorithm, stepping, fontsize, showTopLabels) # and now zoom the full-sized merged image, add scalebar mergedImage = imgUtil.resizeImage(fullMergedImage, width, height) if scalebar: xIndent = spacer yIndent = xIndent # and the scale bar will be half size sbar = float(scalebar) / imageZoom status, logMsg = figUtil.addScalebar( sbar, xIndent, yIndent, mergedImage, pixels, overlayColour) log(logMsg) # draw ROI onto mergedImage... # recalculate roi if the image has been zoomed x = roiX / imageZoom y = roiY / imageZoom roiX2 = (roiX + roiWidth) / imageZoom roiY2 = (roiY + roiHeight) / imageZoom drawRectangle( mergedImage, x, y, roiX2, roiY2, overlayColour, roiOutline) # note the maxWidth of zoomed panels and total height for row maxSplitPanelWidth = max(maxSplitPanelWidth, roiSplitPane.size[0]) totalcanvasHeight += spacer + max(height+topSpacer, roiSplitPane.size[1]) mergedImages.append(mergedImage) roiSplitPanes.append(roiSplitPane) topSpacers.append(topSpacer) # remove the labels for the invalid images (without ROIs) invalidImages.reverse() for row in invalidImages: del imageLabels[row] # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom canvasWidth = leftTextWidth + width + spacer + maxSplitPanelWidth + spacer figureSize = (canvasWidth, totalcanvasHeight + spacer) figureCanvas = Image.new("RGB", figureSize, (255, 255, 255)) rowY = spacer for row, image in enumerate(mergedImages): labelCanvas = figUtil.getVerticalLabels(imageLabels[row], font, textGap) vOffset = (image.size[1] - labelCanvas.size[1]) / 2 imgUtil.pasteImage(labelCanvas, figureCanvas, spacer/2, rowY + topSpacers[row] + vOffset) imgUtil.pasteImage( image, figureCanvas, leftTextWidth, rowY + topSpacers[row]) x = leftTextWidth + width + spacer imgUtil.pasteImage(roiSplitPanes[row], figureCanvas, x, rowY) rowY = rowY + max(image.size[1] + topSpacers[row], roiSplitPanes[row].size[1]) + spacer return figureCanvas
def createMovieFigure(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer, algorithm, stepping, scalebar, overlayColour, timeUnits, imageLabels, maxColCount): """ Makes the complete Movie figure: A canvas showing an image per row with multiple columns showing frames from each image/movie. Labels obove each frame to show the time-stamp of that frame in the specified units and labels on the left name each image. @param session The OMERO session @param pixelIds A list of the Pixel IDs for the images in the figure @param tIndexes A list of tIndexes to display frames from @param zStart Projection Z-start @param zEnd Projection Z-end @param width Maximum width of panels @param height Max height of panels @param spacer Space between panels @param algorithm Projection algorithm e.g. "MAXIMUMINTENSITY" @param stepping Projecttion z-step @param scalebar A number of microns for scale-bar @param overlayColour Color of the scale bar as tuple (255,255,255) @param timeUnits A string such as "SECS" @param imageLabels A list of lists, corresponding to pixelIds, for labelling each image with one or more strings. """ mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() queryService = conn.getQueryService() rowPanels = [] totalHeight = 0 totalWidth = 0 maxImageWidth = 0 physicalSizeX = 0 for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row)) pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() sizeZ = pixels.getSizeZ().getValue() sizeT = pixels.getSizeT().getValue() if pixels.getPhysicalSizeX(): physicalX = pixels.getPhysicalSizeX().getValue() unitsX = pixels.getPhysicalSizeX().getSymbol() else: physicalX = 0 unitsX = "" if pixels.getPhysicalSizeY(): physicalY = pixels.getPhysicalSizeY().getValue() unitsY = pixels.getPhysicalSizeY().getSymbol() else: physicalY = 0 unitsY = "" log(" Pixel size: x: %s %s y: %s %s" % (str(physicalX), unitsX, str(physicalY), unitsY)) if row == 0: # set values for primary image physicalSizeX = physicalX physicalSizeY = physicalY else: # compare primary image with current one if physicalSizeX != physicalX or physicalSizeY != physicalY: log(" WARNING: Images have different pixel lengths. Scales" " are not comparable.") log(" Image dimensions (pixels): x: %d y: %d" % (sizeX, sizeY)) maxImageWidth = max(maxImageWidth, sizeX) # set up rendering engine with the pixels re.lookupPixels(pixelsId) if not re.lookupRenderingDef(pixelsId): re.resetDefaults() if not re.lookupRenderingDef(pixelsId): raise "Failed to lookup Rendering Def" re.load() proStart = zStart proEnd = zEnd # make sure we're within Z range for projection. if proEnd >= sizeZ: proEnd = sizeZ - 1 if proStart > sizeZ: proStart = 0 log(" WARNING: Current image has fewer Z-sections than the" " primary image.") # if we have an invalid z-range (start or end less than 0), show # default Z only if proStart < 0 or proEnd < 0: proStart = re.getDefaultZ() proEnd = proStart log(" Display Z-section: %d" % (proEnd+1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (proStart+1, proEnd+1, sizeZ)) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row renderedImages = [] for time in tIndexes: if time >= sizeT: log(" WARNING: This image does not have Time frame: %d. " "(max is %d)" % (time+1, sizeT)) else: if proStart != proEnd: renderedImg = re.renderProjectedCompressed( algorithm, time, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = time renderedImg = re.renderCompressed(planeDef) # create images and resize, add to list image = Image.open(StringIO.StringIO(renderedImg)) resizedImage = imgUtil.resizeImage(image, width, height) renderedImages.append(resizedImage) # make a canvas for the row of splitview images... # (will add time labels above each row) colCount = min(maxColCount, len(renderedImages)) rowCount = int(math.ceil(float(len(renderedImages)) / colCount)) font = imgUtil.getFont(width/12) fontHeight = font.getsize("Textq")[1] canvasWidth = ((width + spacer) * colCount) + spacer canvasHeight = rowCount * (spacer/2 + fontHeight + spacer + height) size = (canvasWidth, canvasHeight) # create a canvas of appropriate width, height canvas = Image.new(mode, size, white) # add text labels queryService = conn.getQueryService() textX = spacer textY = spacer/4 colIndex = 0 timeLabels = figUtil.getTimeLabels( queryService, pixelsId, tIndexes, sizeT, timeUnits) for t, tIndex in enumerate(tIndexes): if tIndex >= sizeT: continue time = timeLabels[t] textW = font.getsize(time)[0] inset = (width - textW) / 2 textdraw = ImageDraw.Draw(canvas) textdraw.text((textX+inset, textY), time, font=font, fill=(0, 0, 0)) textX += width + spacer colIndex += 1 if colIndex >= maxColCount: colIndex = 0 textX = spacer textY += (spacer/2 + fontHeight + spacer + height) # add scale bar to last frame... if scalebar: scaledImage = renderedImages[-1] xIndent = spacer yIndent = xIndent # if we've scaled to half size, zoom = 2 zoom = imgUtil.getZoomFactor(scaledImage.size, width, height) # and the scale bar will be half size sbar = float(scalebar) / zoom status, logMsg = figUtil.addScalebar( sbar, xIndent, yIndent, scaledImage, pixels, overlayColour) log(logMsg) px = spacer py = spacer + fontHeight colIndex = 0 # paste the images in for i, img in enumerate(renderedImages): imgUtil.pasteImage(img, canvas, px, py) px = px + width + spacer colIndex += 1 if colIndex >= maxColCount: colIndex = 0 px = spacer py += (spacer/2 + fontHeight + spacer + height) # Add labels to the left of the panel canvas = addLeftLabels(canvas, imageLabels, row, width, spacer) # most should be same width anyway totalWidth = max(totalWidth, canvas.size[0]) # add together the heights of each row totalHeight = totalHeight + canvas.size[1] rowPanels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom figureSize = (totalWidth, totalHeight+spacer) figureCanvas = Image.new(mode, figureSize, white) rowY = spacer / 2 for row in rowPanels: imgUtil.pasteImage(row, figureCanvas, 0, rowY) rowY = rowY + row.size[1] return figureCanvas
def getSplitView(conn, imageIds, pixelIds, mergedIndexes, mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, maxColumns, showRoiDuration, roiLabel): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @mergedIndexes. The figure is returned as a PIL 'Image' @ session session for server access @ pixelIds a list of the Ids for the pixels we want to display @ mergedIndexes list or set of channels in the merged image @ mergedColours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ roiService = conn.getRoiService() re = conn.createRenderingEngine() queryService = conn.getQueryService() # only needed for movie # establish dimensions and roiZoom for the primary image # getTheseValues from the server for iid in imageIds: rect = getRectangle(roiService, iid, roiLabel) if rect is not None: break if rect is None: log("Found no images with rectangle ROIs") return x, y, roiWidth, roiHeight, timeShapeMap = rect roiOutline = ((max(width, height)) / 200) + 1 if roiZoom is None: # get the pixels for priamry image. pixels = queryService.get("Pixels", pixelIds[0]) sizeY = pixels.getSizeY().getValue() roiZoom = float(height) / float(roiHeight) log("ROI zoom set by primary image is %F X" % roiZoom) else: log("ROI zoom: %F X" % roiZoom) textGap = spacer/3 fontsize = 12 if width > 500: fontsize = 48 elif width > 400: fontsize = 36 elif width > 300: fontsize = 24 elif width > 200: fontsize = 16 font = imgUtil.getFont(fontsize) textHeight = font.getsize("Textq")[1] maxCount = 0 for row in imageLabels: maxCount = max(maxCount, len(row)) leftTextWidth = (textHeight + textGap) * maxCount + spacer maxSplitPanelWidth = 0 totalcanvasHeight = 0 mergedImages = [] roiSplitPanes = [] topSpacers = [] # space for labels above each row for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row)) # need to get the roi dimensions from the server imageId = imageIds[row] roi = getRectangle(roiService, imageId, roiLabel) if roi is None: log("No Rectangle ROI found for this image") del imageLabels[row] # remove the corresponding labels continue roiX, roiY, roiWidth, roiHeight, timeShapeMap = roi pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() # work out if any additional zoom is needed (if the full-sized image # is different size from primary image) fullSize = (sizeX, sizeY) imageZoom = imgUtil.getZoomFactor(fullSize, width, height) if imageZoom != 1.0: log(" Scaling down the full-size image by a factor of %F" % imageZoom) log(" ROI location (top-left of first frame) x: %d y: %d and size" " width: %d height: %d" % (roiX, roiY, roiWidth, roiHeight)) # get the split pane and full merged image roiSplitPane, fullMergedImage, topSpacer = getROImovieView( re, queryService, pixels, timeShapeMap, mergedIndexes, mergedColours, roiWidth, roiHeight, roiZoom, spacer, algorithm, stepping, fontsize, maxColumns, showRoiDuration) # and now zoom the full-sized merged image, add scalebar mergedImage = imgUtil.resizeImage(fullMergedImage, width, height) if scalebar: xIndent = spacer yIndent = xIndent # and the scale bar will be half size sbar = float(scalebar) / imageZoom status, logMsg = figUtil.addScalebar( sbar, xIndent, yIndent, mergedImage, pixels, overlayColour) log(logMsg) # draw ROI onto mergedImage... # recalculate roi if the image has been zoomed x = roiX / imageZoom y = roiY / imageZoom roiX2 = (roiX + roiWidth) / imageZoom roiY2 = (roiY + roiHeight) / imageZoom drawRectangle( mergedImage, x, y, roiX2, roiY2, overlayColour, roiOutline) # note the maxWidth of zoomed panels and total height for row maxSplitPanelWidth = max(maxSplitPanelWidth, roiSplitPane.size[0]) totalcanvasHeight += spacer + max(height+topSpacer, roiSplitPane.size[1]) mergedImages.append(mergedImage) roiSplitPanes.append(roiSplitPane) topSpacers.append(topSpacer) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom canvasWidth = leftTextWidth + width + spacer + maxSplitPanelWidth + spacer figureSize = (canvasWidth, totalcanvasHeight + spacer) figureCanvas = Image.new("RGB", figureSize, (255, 255, 255)) rowY = spacer for row, image in enumerate(mergedImages): labelCanvas = figUtil.getVerticalLabels(imageLabels[row], font, textGap) vOffset = (image.size[1] - labelCanvas.size[1]) / 2 imgUtil.pasteImage(labelCanvas, figureCanvas, spacer / 2, rowY+topSpacers[row] + vOffset) imgUtil.pasteImage( image, figureCanvas, leftTextWidth, rowY + topSpacers[row]) x = leftTextWidth + width + spacer imgUtil.pasteImage(roiSplitPanes[row], figureCanvas, x, rowY) rowY = rowY + max(image.size[1] + topSpacers[row], roiSplitPanes[row].size[1]) + spacer return figureCanvas
def getSplitView(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames, colourChannels, mergedIndexes, mergedColours, width=None, height=None, spacer = 12, algorithm = None, stepping = 1, scalebar = None, overlayColour=(255,255,255)): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @mergedIndexes. No text labels are added to the image at this stage. The figure is returned as a PIL 'Image' @ session session for server access @ pixelIds a list of the Ids for the pixels we want to display @ zStart the start of Z-range for projection @ zEnd the end of Z-range for projection @ splitIndexes a list of the channel indexes to display. Same channels for each image/row @ channelNames the Map of index:names to go above the columns for each split channel @ colourChannels the colour to make each column/ channel @ mergedIndexes list or set of channels in the merged image @ mergedColours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ if algorithm is None: # omero::constants::projection::ProjectionType algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY timepoint = 0 mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() queryService = conn.getQueryService() rowPanels = [] totalHeight = 0 totalWidth = 0 maxImageWidth = 0 physicalSizeX = 0 log("Split View Rendering Log...") if zStart >-1 and zEnd >-1: alString = str(algorithm).replace("INTENSITY", " Intensity").capitalize() log("All images projected using '%s' projection with step size: %d start: %d end: %d" % (alString, stepping, zStart+1, zEnd+1)) else: log("Images show last-viewed Z-section") for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row+1)) pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() sizeZ = pixels.getSizeZ().getValue() sizeC = pixels.getSizeC().getValue() if pixels.getPhysicalSizeX(): physicalX = pixels.getPhysicalSizeX().getValue() else: physicalX = 0 if pixels.getPhysicalSizeY(): physicalY = pixels.getPhysicalSizeY().getValue() else: physicalY = 0 log(" Pixel size (um): x: %.3f y: %.3f" % (physicalX, physicalY)) if row == 0: # set values for primary image physicalSizeX = physicalX physicalSizeY = physicalY else: # compare primary image with current one if physicalSizeX != physicalX or physicalSizeY != physicalY: log(" WARNING: Images have different pixel lengths. Scales are not comparable.") log(" Image dimensions (pixels): x: %d y: %d" % (sizeX, sizeY)) maxImageWidth = max(maxImageWidth, sizeX) # set up rendering engine with the pixels re.lookupPixels(pixelsId) if not re.lookupRenderingDef(pixelsId): re.resetDefaults() if not re.lookupRenderingDef(pixelsId): raise "Failed to lookup Rendering Def" re.load() proStart = zStart proEnd = zEnd # make sure we're within Z range for projection. if proEnd >= sizeZ: proEnd = sizeZ - 1 if proStart > sizeZ: proStart = 0 log(" WARNING: Current image has fewer Z-sections than the primary image.") # if we have an invalid z-range (start or end less than 0), show default Z only if proStart < 0 or proEnd < 0: proStart = re.getDefaultZ() proEnd = proStart log(" Display Z-section: %d" % (proEnd+1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (proStart+1, proEnd+1, sizeZ)) # turn on channels in mergedIndexes. for i in mergedIndexes: if i >= sizeC: channelMismatch = True else: re.setActive(i, True) if i in mergedColours: re.setRGBA(i, *mergedColours[i]) # get the combined image, using the existing rendering settings channelsString = ", ".join([channelNames[i] for i in mergedIndexes]) log(" Rendering merged channels: %s" % channelsString) if proStart != proEnd: overlay = re.renderProjectedCompressed(algorithm, timepoint, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = timepoint overlay = re.renderCompressed(planeDef) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row renderedImages = [] i = 0 channelMismatch = False # first, turn off all channels in pixels for i in range(sizeC): re.setActive(i, False) # for each channel in the splitview... for index in splitIndexes: if index >= sizeC: channelMismatch = True # can't turn channel on - simply render black square! renderedImages.append(None) else: re.setActive(index, True) # turn channel on if colourChannels: # if split channels are coloured... if index in mergedIndexes: # and this channel is in the combined image if index in mergedColours: rgba = tuple(mergedColours[index]) print "Setting channel to color", index, rgba re.setRGBA(index, *rgba) # set coloured else: mergedColours[index] = re.getRGBA(index) else: re.setRGBA(index,255,255,255,255) # otherwise set white (max alpha) else: re.setRGBA(index,255,255,255,255) # if not colourChannels - channels are white info = (index, re.getChannelWindowStart(index), re.getChannelWindowEnd(index)) log(" Render channel: %s start: %d end: %d" % info) if proStart != proEnd: renderedImg = re.renderProjectedCompressed(algorithm, timepoint, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = timepoint renderedImg = re.renderCompressed(planeDef) renderedImages.append(renderedImg) if index < sizeC: re.setActive(index, False) # turn the channel off again! if channelMismatch: log(" WARNING channel mismatch: The current image has fewer channels than the primary image.") # make a canvas for the row of splitview images... imageCount = len(renderedImages) + 1 # extra image for combined image canvasWidth = ((width + spacer) * imageCount) + spacer canvasHeight = spacer + height size = (canvasWidth, canvasHeight) canvas = Image.new(mode, size, white) # create a canvas of appropriate width, height px = spacer py = spacer/2 col = 0 # paste the images in for img in renderedImages: if img is None: im = Image.new(mode, (sizeX, sizeY), (0,0,0)) else: im = Image.open(StringIO.StringIO(img)) i = imgUtil.resizeImage(im, width, height) imgUtil.pasteImage(i, canvas, px, py) px = px + width + spacer col = col + 1 # add combined image, after resizing and adding scale bar i = Image.open(StringIO.StringIO(overlay)) scaledImage = imgUtil.resizeImage(i, width, height) if scalebar: xIndent = spacer yIndent = xIndent zoom = imgUtil.getZoomFactor(i.size, width, height) # if we've scaled to half size, zoom = 2 sbar = float(scalebar) / zoom # and the scale bar will be half size status, logMsg = figUtil.addScalebar(sbar, xIndent, yIndent, scaledImage, pixels, overlayColour) log(logMsg) imgUtil.pasteImage(scaledImage, canvas, px, py) totalWidth = max(totalWidth, canvasWidth) # most should be same width anyway totalHeight = totalHeight + canvasHeight # add together the heights of each row rowPanels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 spacer top and bottom figureSize = (totalWidth, totalHeight+spacer) figureCanvas = Image.new(mode, figureSize, white) rowY = spacer/2 for row in rowPanels: imgUtil.pasteImage(row, figureCanvas, 0, rowY) rowY = rowY + row.size[1] return figureCanvas
def getSplitView(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames, colourChannels, mergedIndexes, mergedColours, width=None, height=None, spacer=12, algorithm=None, stepping=1, scalebar=None, overlayColour=(255, 255, 255)): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @mergedIndexes. No text labels are added to the image at this stage. The figure is returned as a PIL 'Image' @ session session for server access @ pixelIds a list of the Ids for the pixels we want to display @ zStart the start of Z-range for projection @ zEnd the end of Z-range for projection @ splitIndexes a list of the channel indexes to display. Same channels for each image/row @ channelNames the Map of index:names to go above the columns for each split channel @ colourChannels the colour to make each column/ channel @ mergedIndexes list or set of channels in the merged image @ mergedColours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ if algorithm is None: # omero::constants::projection::ProjectionType algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY timepoint = 0 mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() queryService = conn.getQueryService() rowPanels = [] totalHeight = 0 totalWidth = 0 maxImageWidth = 0 physicalSizeX = 0 log("Split View Rendering Log...") if zStart > -1 and zEnd > -1: alString = str(algorithm).replace("INTENSITY", " Intensity").capitalize() log("All images projected using '%s' projection with step size: %d start: %d end: %d" % (alString, stepping, zStart + 1, zEnd + 1)) else: log("Images show last-viewed Z-section") for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row + 1)) pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() sizeZ = pixels.getSizeZ().getValue() sizeC = pixels.getSizeC().getValue() if pixels.getPhysicalSizeX(): physicalX = pixels.getPhysicalSizeX().getValue() else: physicalX = 0 if pixels.getPhysicalSizeY(): physicalY = pixels.getPhysicalSizeY().getValue() else: physicalY = 0 log(" Pixel size (um): x: %.3f y: %.3f" % (physicalX, physicalY)) if row == 0: # set values for primary image physicalSizeX = physicalX physicalSizeY = physicalY else: # compare primary image with current one if physicalSizeX != physicalX or physicalSizeY != physicalY: log(" WARNING: Images have different pixel lengths. Scales are not comparable." ) log(" Image dimensions (pixels): x: %d y: %d" % (sizeX, sizeY)) maxImageWidth = max(maxImageWidth, sizeX) # set up rendering engine with the pixels re.lookupPixels(pixelsId) if not re.lookupRenderingDef(pixelsId): re.resetDefaults() if not re.lookupRenderingDef(pixelsId): raise "Failed to lookup Rendering Def" re.load() proStart = zStart proEnd = zEnd # make sure we're within Z range for projection. if proEnd >= sizeZ: proEnd = sizeZ - 1 if proStart > sizeZ: proStart = 0 log(" WARNING: Current image has fewer Z-sections than the primary image." ) # if we have an invalid z-range (start or end less than 0), show default Z only if proStart < 0 or proEnd < 0: proStart = re.getDefaultZ() proEnd = proStart log(" Display Z-section: %d" % (proEnd + 1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (proStart + 1, proEnd + 1, sizeZ)) # turn on channels in mergedIndexes. for i in mergedIndexes: if i >= sizeC: channelMismatch = True else: re.setActive(i, True) if i in mergedColours: re.setRGBA(i, *mergedColours[i]) # get the combined image, using the existing rendering settings channelsString = ", ".join([channelNames[i] for i in mergedIndexes]) log(" Rendering merged channels: %s" % channelsString) if proStart != proEnd: overlay = re.renderProjectedCompressed(algorithm, timepoint, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = timepoint overlay = re.renderCompressed(planeDef) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row renderedImages = [] i = 0 channelMismatch = False # first, turn off all channels in pixels for i in range(sizeC): re.setActive(i, False) # for each channel in the splitview... for index in splitIndexes: if index >= sizeC: channelMismatch = True # can't turn channel on - simply render black square! renderedImages.append(None) else: re.setActive(index, True) # turn channel on if colourChannels: # if split channels are coloured... if index in mergedIndexes: # and this channel is in the combined image if index in mergedColours: rgba = tuple(mergedColours[index]) print "Setting channel to color", index, rgba re.setRGBA(index, *rgba) # set coloured else: mergedColours[index] = re.getRGBA(index) else: re.setRGBA(index, 255, 255, 255, 255) # otherwise set white (max alpha) else: re.setRGBA( index, 255, 255, 255, 255) # if not colourChannels - channels are white info = (index, re.getChannelWindowStart(index), re.getChannelWindowEnd(index)) log(" Render channel: %s start: %d end: %d" % info) if proStart != proEnd: renderedImg = re.renderProjectedCompressed( algorithm, timepoint, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = timepoint renderedImg = re.renderCompressed(planeDef) renderedImages.append(renderedImg) if index < sizeC: re.setActive(index, False) # turn the channel off again! if channelMismatch: log(" WARNING channel mismatch: The current image has fewer channels than the primary image." ) # make a canvas for the row of splitview images... imageCount = len(renderedImages) + 1 # extra image for combined image canvasWidth = ((width + spacer) * imageCount) + spacer canvasHeight = spacer + height size = (canvasWidth, canvasHeight) canvas = Image.new( mode, size, white) # create a canvas of appropriate width, height px = spacer py = spacer / 2 col = 0 # paste the images in for img in renderedImages: if img is None: im = Image.new(mode, (sizeX, sizeY), (0, 0, 0)) else: im = Image.open(StringIO.StringIO(img)) i = imgUtil.resizeImage(im, width, height) imgUtil.pasteImage(i, canvas, px, py) px = px + width + spacer col = col + 1 # add combined image, after resizing and adding scale bar i = Image.open(StringIO.StringIO(overlay)) scaledImage = imgUtil.resizeImage(i, width, height) if scalebar: xIndent = spacer yIndent = xIndent zoom = imgUtil.getZoomFactor( i.size, width, height) # if we've scaled to half size, zoom = 2 sbar = float( scalebar) / zoom # and the scale bar will be half size status, logMsg = figUtil.addScalebar(sbar, xIndent, yIndent, scaledImage, pixels, overlayColour) log(logMsg) imgUtil.pasteImage(scaledImage, canvas, px, py) totalWidth = max(totalWidth, canvasWidth) # most should be same width anyway totalHeight = totalHeight + canvasHeight # add together the heights of each row rowPanels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 spacer top and bottom figureSize = (totalWidth, totalHeight + spacer) figureCanvas = Image.new(mode, figureSize, white) rowY = spacer / 2 for row in rowPanels: imgUtil.pasteImage(row, figureCanvas, 0, rowY) rowY = rowY + row.size[1] return figureCanvas
def createMovieFigure(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer, algorithm, stepping, scalebar, overlayColour, timeUnits, imageLabels, maxColCount): """ Makes the complete Movie figure: A canvas showing an image per row with multiple columns showing frames from each image/movie. Labels obove each frame to show the time-stamp of that frame in the specified units and labels on the left name each image. @param session The OMERO session @param pixelIds A list of the Pixel IDs for the images in the figure @param tIndexes A list of tIndexes to display frames from @param zStart Projection Z-start @param zEnd Projection Z-end @param width Maximum width of panels @param height Max height of panels @param spacer Space between panels @param algorithm Projection algorithm e.g. "MAXIMUMINTENSITY" @param stepping Projecttion z-step @param scalebar A number of microns for scale-bar @param overlayColour Colour of the scale-bar as tuple (255,255,255) @param timeUnits A string such as "SECS" @param imageLabels A list of lists, corresponding to pixelIds, for labelling each image with one or more strings. """ mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() queryService = conn.getQueryService() rowPanels = [] totalHeight = 0 totalWidth = 0 maxImageWidth = 0 physicalSizeX = 0 for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row)) pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() sizeZ = pixels.getSizeZ().getValue() sizeT = pixels.getSizeT().getValue() if pixels.getPhysicalSizeX(): physicalX = pixels.getPhysicalSizeX().getValue() else: physicalX = 0 if pixels.getPhysicalSizeY(): physicalY = pixels.getPhysicalSizeY().getValue() else: physicalY = 0 log(" Pixel size (um): x: %s y: %s" % (str(physicalX), str(physicalY))) if row == 0: # set values for primary image physicalSizeX = physicalX physicalSizeY = physicalY else: # compare primary image with current one if physicalSizeX != physicalX or physicalSizeY != physicalY: log(" WARNING: Images have different pixel lengths. Scales" " are not comparable.") log(" Image dimensions (pixels): x: %d y: %d" % (sizeX, sizeY)) maxImageWidth = max(maxImageWidth, sizeX) # set up rendering engine with the pixels re.lookupPixels(pixelsId) if not re.lookupRenderingDef(pixelsId): re.resetDefaults() if not re.lookupRenderingDef(pixelsId): raise "Failed to lookup Rendering Def" re.load() proStart = zStart proEnd = zEnd # make sure we're within Z range for projection. if proEnd >= sizeZ: proEnd = sizeZ - 1 if proStart > sizeZ: proStart = 0 log(" WARNING: Current image has fewer Z-sections than the" " primary image.") # if we have an invalid z-range (start or end less than 0), show # default Z only if proStart < 0 or proEnd < 0: proStart = re.getDefaultZ() proEnd = proStart log(" Display Z-section: %d" % (proEnd + 1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (proStart + 1, proEnd + 1, sizeZ)) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row renderedImages = [] for time in tIndexes: if time >= sizeT: log(" WARNING: This image does not have Time frame: %d. " "(max is %d)" % (time + 1, sizeT)) else: if proStart != proEnd: renderedImg = re.renderProjectedCompressed( algorithm, time, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = time renderedImg = re.renderCompressed(planeDef) # create images and resize, add to list image = Image.open(StringIO.StringIO(renderedImg)) resizedImage = imgUtil.resizeImage(image, width, height) renderedImages.append(resizedImage) # make a canvas for the row of splitview images... # (will add time labels above each row) colCount = min(maxColCount, len(renderedImages)) rowCount = int(math.ceil(float(len(renderedImages)) / colCount)) font = imgUtil.getFont(width / 12) fontHeight = font.getsize("Textq")[1] canvasWidth = ((width + spacer) * colCount) + spacer canvasHeight = rowCount * (spacer / 2 + fontHeight + spacer + height) size = (canvasWidth, canvasHeight) # create a canvas of appropriate width, height canvas = Image.new(mode, size, white) # add text labels queryService = conn.getQueryService() textX = spacer textY = spacer / 4 colIndex = 0 timeLabels = figUtil.getTimeLabels(queryService, pixelsId, tIndexes, sizeT, timeUnits) for t, tIndex in enumerate(tIndexes): if tIndex >= sizeT: continue time = timeLabels[t] textW = font.getsize(time)[0] inset = (width - textW) / 2 textdraw = ImageDraw.Draw(canvas) textdraw.text((textX + inset, textY), time, font=font, fill=(0, 0, 0)) textX += width + spacer colIndex += 1 if colIndex >= maxColCount: colIndex = 0 textX = spacer textY += (spacer / 2 + fontHeight + spacer + height) # add scale bar to last frame... if scalebar: scaledImage = renderedImages[-1] xIndent = spacer yIndent = xIndent # if we've scaled to half size, zoom = 2 zoom = imgUtil.getZoomFactor(scaledImage.size, width, height) # and the scale bar will be half size sbar = float(scalebar) / zoom status, logMsg = figUtil.addScalebar(sbar, xIndent, yIndent, scaledImage, pixels, overlayColour) log(logMsg) px = spacer py = spacer + fontHeight colIndex = 0 # paste the images in for i, img in enumerate(renderedImages): imgUtil.pasteImage(img, canvas, px, py) px = px + width + spacer colIndex += 1 if colIndex >= maxColCount: colIndex = 0 px = spacer py += (spacer / 2 + fontHeight + spacer + height) # Add labels to the left of the panel canvas = addLeftLabels(canvas, imageLabels, row, width, spacer) # most should be same width anyway totalWidth = max(totalWidth, canvas.size[0]) # add together the heights of each row totalHeight = totalHeight + canvas.size[1] rowPanels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom figureSize = (totalWidth, totalHeight + spacer) figureCanvas = Image.new(mode, figureSize, white) rowY = spacer / 2 for row in rowPanels: imgUtil.pasteImage(row, figureCanvas, 0, rowY) rowY = rowY + row.size[1] return figureCanvas
def makeFrapFigure(session, commandArgs): """ Main method called to make the figure. Returns fileID object of the child of the fileAnnotation """ roiService = session.getRoiService() queryService = session.getQueryService() updateService = session.getUpdateService() rawFileStore = session.createRawFileStore() rawPixelStore = session.createRawPixelsStore() renderingEngine = session.createRenderingEngine() imageId = commandArgs["imageId"] theC = 0 if "theC" in commandArgs: theC = commandArgs["theC"] image = queryService.get("Image", imageId) imageName = image.getName().getValue() query_string = "select p from Pixels p join fetch p.image i join fetch p.pixelsType pt where i.id='%d'" % imageId pixels = queryService.findByQuery(query_string, None) #pixels = image.getPrimaryPixels() pixelsId = pixels.getId().getValue() #sizeX = pixels.getSizeX().getValue() #sizeY = pixels.getSizeY().getValue() #sizeZ = pixels.getSizeZ().getValue() #sizeC = pixels.getSizeC().getValue() #sizeT = pixels.getSizeT().getValue() bypassOriginalFile = True rawPixelStore.setPixelsId(pixelsId, bypassOriginalFile) roiLabels = ["FRAP", "Base", "Whole"] roiMap = getEllipses(roiService, imageId, roiLabels) for l in roiLabels: if l not in roiMap.keys(): print "ROI: '%s' not found. Cannot calculate FRAP" % l return frapMap = roiMap["FRAP"] baseMap = roiMap["Base"] wholeMap = roiMap["Whole"] # make a list of the t indexes that have all 3 of the Shapes we need. # and a list of the roiShapes for easy access. tIndexes = [] frapROI = [] baseROI = [] wholeROI = [] for t in frapMap.keys(): if t in baseMap.keys() and t in wholeMap.keys(): tIndexes.append(t) frapROI.append(frapMap[t]) baseROI.append(baseMap[t]) wholeROI.append(wholeMap[t]) tIndexes.sort() log("T Indexes, " + ",".join([str(t) for t in tIndexes])) # get the actual plane times. timeMap = figUtil.getTimes(queryService, pixelsId, tIndexes, theZ=0, theC=0) timeList = [] for t in tIndexes: if t in timeMap: timeList.append(timeMap[t]) else: # handles images which don't have PlaneInfo timeMap[t] = t timeList.append(t) log("Plane times (secs), " + ",".join([str(t) for t in timeList])) # lists of averageIntensity for the 3 ROIs frapValues = [] baseValues = [] wholeValues = [] frapBleach = None theZ = 0 for i, t in enumerate(tIndexes): shapes = [frapROI[i], baseROI[i], wholeROI[i]] theZ = frapROI[i][4] # get theZ from the FRAP ROI # get a list of the average values of pixels in the three shapes. averages = analyseEllipses(shapes, pixels, rawPixelStore, theC, t, theZ) if frapBleach == None: frapBleach = averages[0] else: frapBleach = min(frapBleach, averages[0]) frapValues.append(averages[0]) baseValues.append(averages[1]) wholeValues.append(averages[2]) log("FRAP Values, " + ",".join([str(v) for v in frapValues])) log("Base Values, " + ",".join([str(v) for v in baseValues])) log("Whole Values, " + ",".join([str(v) for v in wholeValues])) # find the time of the bleach event (lowest intensity ) tBleach = frapValues.index(frapBleach) log("Pre-bleach frames, %d" % tBleach) if tBleach == 0: print "No pre-bleach images. Can't calculate FRAP" return # using frames before and after tBleach - calculate bleach ranges etc. frapPre = average(frapValues[:tBleach]) - average(baseValues[:tBleach]) wholePre = average(wholeValues[:tBleach]) - average(baseValues[:tBleach]) wholePost = average(wholeValues[tBleach:]) - average(baseValues[tBleach:]) # use these values to get a ratio of FRAP intensity / pre-Bleach intensity * (corrected by intensity of 'Whole' ROI) frapNormCorr = [] for i in range(len(tIndexes)): frapNormCorr.append( (float(frapValues[i] - baseValues[i]) / frapPre) * (wholePre / float(wholeValues[i] - baseValues[i])) ) log("FRAP Corrected, " + ",".join([str(v) for v in frapNormCorr])) # work out the range of recovery (bleach -> plateau) and the time to reach half of this after bleach. frapBleachNormCorr = frapNormCorr[tBleach] plateauNormCorr = average(frapNormCorr[-5:]) plateauMinusBleachNormCorr = plateauNormCorr - frapBleachNormCorr mobileFraction = plateauMinusBleachNormCorr / float(1 - frapBleachNormCorr) immobileFraction = 1 - mobileFraction halfMaxNormCorr = plateauMinusBleachNormCorr /2 + frapBleachNormCorr log("Corrected Bleach Intensity, %f" % frapBleachNormCorr) log("Corrected Plateau Intensity, %f" % plateauNormCorr) log("Plateau - Bleach, %f" % plateauMinusBleachNormCorr) log("Mobile Fraction, %f" % mobileFraction) log("Immobile Fraction, %f" % immobileFraction) log("Half Recovered Intensity, %f" % halfMaxNormCorr) # Define the T-half for this FRAP. In place of fitting an exact curve to the # data, find the two time-points that the half Max of recovery sits between # and find the T-half using a linear approximation between these two points. # The T-half is this solved for halfMaxNormCorr - timestamp(tBleach) th = None for t in tIndexes[tBleach:]: if halfMaxNormCorr < frapNormCorr[t]: th = tIndexes[t] break y1 = frapNormCorr[th-1] y2 = frapNormCorr[th] x1 = timeList[th-1] x2 = timeList[th] m1 = (y2-y1)/(x2-x1); #Gradient of the line c1 = y1-m1*x1; #Y-intercept tHalf = (halfMaxNormCorr-c1)/m1 - timeList[tBleach] log("Bleach time, %f seconds" % timeList[tBleach]) log("T-Half, %f seconds" % tHalf) figLegend = "\n".join(logLines) print figLegend # make PIL image of the last frame before FRAP spacer = 5 frames = [] ellipses = [frapROI[tBleach-1], frapROI[tBleach], frapROI[-1]] frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[tBleach-1])) frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[tBleach])) frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[-1])) figW = 450 font = imgUtil.getFont(16) fontH = font.getsize("FRAP")[1] labels = ["Pre-Bleach", "Bleach", "Recovery"] imgW = (figW - (2 * spacer) ) / len(frames) # shrink the images by width, or maintain height if shrink not needed. smallImages = [imgUtil.resizeImage(img, imgW, img.size[1]) for img in frames] zoomOut = 1/imgUtil.getZoomFactor(img.size, imgW, img.size[1]) figH = smallImages[0].size[1] + spacer + fontH frapCanvas = Image.new("RGB", (figW, figH), (255,255,255)) draw = ImageDraw.Draw(frapCanvas) y = spacer + fontH x = 0 for l, img in enumerate(frames): label = labels[l] indent = (imgW - font.getsize(label)[0]) / 2 draw.text((x+indent, 0), label, font=font, fill=(0,0,0)) roiImage = addEllipse(smallImages[l], ellipses[l], zoomOut) imgUtil.pasteImage(roiImage, frapCanvas, x, y) x += spacer + imgW #frapCanvas.show() # bug-fixing only fileName = imageName + ".png" frapCanvas.save(fileName, "PNG") format = PNG output = fileName # if reportLab has imported... if reportLab: # we are going to export a PDF, not a JPEG format = PDF output = imageName + ".pdf" # create a plot of curve fitted to: y = 1 - e(It) # where thalf = ln 0.5 / -I # http://www.embl.de/eamnet/frap/html/halftime.html import math i = 1/float(tHalf) * math.log(0.5) fittedPoints = [] for t in timeList[3:]: print math.exp(t * i) f = frapBleachNormCorr + ((plateauNormCorr-frapBleachNormCorr) * (1 - math.exp(t * i))) fittedPoints.append(f) print fittedPoints log("Fitted: , " + str(fittedPoints)) # create a plot of the FRAP data figHeight = 450 figWidth = 400 drawing = Drawing(figWidth, figHeight) lp = LinePlot() lp.x = 50 lp.y = 50 lp.height = 300 lp.width = 300 lp.data = [zip(timeList, frapNormCorr), zip(timeList[3:], fittedPoints)] lp.lines[0].strokeColor = colors.red lp.lines[0].symbol = makeMarker('Circle') lp.lines[1].strokeColor = colors.green lp.lines[1].symbol = makeMarker('Circle') drawing.add(lp) drawing.add(String(200,25, 'Time (seconds)', fontSize=12, textAnchor="middle")) drawing.add(String(200,figHeight-25, imageName, fontSize=12, textAnchor="middle")) drawing.add(String(200,figHeight-50, 'T(1/2) = %f' % tHalf, fontSize=12, textAnchor="middle")) # create an A4 canvas to make the pdf figure figCanvas = canvas.Canvas(output, pagesize=A4) pasteX = 100 pasteY = 75 # add the FRAP image figCanvas.drawImage(fileName, pasteX-25, pasteY) # add the FRAP data plot renderPDF.draw(drawing, figCanvas, pasteX, 300, showBoundary=True) figCanvas.save() fileId = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, output, format, figLegend) return fileId