def getSplitView(conn, imageIds, pixelIds, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, roiLabel): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @mergedIndexes. The figure is returned as a PIL 'Image' @ session session for server access @ pixelIds a list of the Ids for the pixels we want to display @ splitIndexes a list of the channel indexes to display. Same channels for each image/row @ channelNames the Map of index:names for all channels @ zStart the start of Z-range for projection @ zEnd the end of Z-range for projection @ colourChannels the colour to make each column/ channel @ mergedIndexes list or set of channels in the merged image @ mergedColours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ roiService = conn.getRoiService() re = conn.createRenderingEngine() queryService = conn.getQueryService() # only needed for movie # establish dimensions and roiZoom for the primary image # getTheseValues from the server rect = getRectangle(roiService, imageIds[0], roiLabel) if rect is None: raise Exception("No ROI found for the first image.") roiX, roiY, roiWidth, roiHeight, yMin, yMax, tMin, tMax = rect roiOutline = ((max(width, height)) / 200) + 1 if roiZoom is None: # get the pixels for priamry image. pixels = queryService.get("Pixels", pixelIds[0]) sizeY = pixels.getSizeY().getValue() roiZoom = float(height) / float(roiHeight) log("ROI zoom set by primary image is %F X" % roiZoom) else: log("ROI zoom: %F X" % roiZoom) textGap = spacer/3 fontsize = 12 if width > 500: fontsize = 48 elif width > 400: fontsize = 36 elif width > 300: fontsize = 24 elif width > 200: fontsize = 16 font = imgUtil.getFont(fontsize) textHeight = font.getsize("Textq")[1] maxCount = 0 for row in imageLabels: maxCount = max(maxCount, len(row)) leftTextWidth = (textHeight + textGap) * maxCount + spacer maxSplitPanelWidth = 0 totalcanvasHeight = 0 mergedImages = [] roiSplitPanes = [] topSpacers = [] # space for labels above each row showLabelsAboveEveryRow = False invalidImages = [] # note any image row indexes that don't have ROIs. for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row)) if showLabelsAboveEveryRow: showTopLabels = True else: showTopLabels = (row == 0) # only show top labels for first row # need to get the roi dimensions from the server imageId = imageIds[row] roi = getRectangle(roiService, imageId, roiLabel) if roi is None: log("No Rectangle ROI found for this image") invalidImages.append(row) continue roiX, roiY, roiWidth, roiHeight, zMin, zMax, tStart, tEnd = roi pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() zStart = zMin zEnd = zMax # work out if any additional zoom is needed (if the full-sized image # is different size from primary image) fullSize = (sizeX, sizeY) imageZoom = imgUtil.getZoomFactor(fullSize, width, height) if imageZoom != 1.0: log(" Scaling down the full-size image by a factor of %F" % imageZoom) log(" ROI location (top-left) x: %d y: %d and size width:" " %d height: %d" % (roiX, roiY, roiWidth, roiHeight)) log(" ROI time: %d - %d zRange: %d - %d" % (tStart+1, tEnd+1, zStart+1, zEnd+1)) # get the split pane and full merged image roiSplitPane, fullMergedImage, topSpacer = getROIsplitView( re, pixels, zStart, zEnd, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, mergedColours, roiX, roiY, roiWidth, roiHeight, roiZoom, tStart, spacer, algorithm, stepping, fontsize, showTopLabels) # and now zoom the full-sized merged image, add scalebar mergedImage = imgUtil.resizeImage(fullMergedImage, width, height) if scalebar: xIndent = spacer yIndent = xIndent # and the scale bar will be half size sbar = float(scalebar) / imageZoom status, logMsg = figUtil.addScalebar( sbar, xIndent, yIndent, mergedImage, pixels, overlayColour) log(logMsg) # draw ROI onto mergedImage... # recalculate roi if the image has been zoomed x = roiX / imageZoom y = roiY / imageZoom roiX2 = (roiX + roiWidth) / imageZoom roiY2 = (roiY + roiHeight) / imageZoom drawRectangle( mergedImage, x, y, roiX2, roiY2, overlayColour, roiOutline) # note the maxWidth of zoomed panels and total height for row maxSplitPanelWidth = max(maxSplitPanelWidth, roiSplitPane.size[0]) totalcanvasHeight += spacer + max(height+topSpacer, roiSplitPane.size[1]) mergedImages.append(mergedImage) roiSplitPanes.append(roiSplitPane) topSpacers.append(topSpacer) # remove the labels for the invalid images (without ROIs) invalidImages.reverse() for row in invalidImages: del imageLabels[row] # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom canvasWidth = leftTextWidth + width + spacer + maxSplitPanelWidth + spacer figureSize = (canvasWidth, totalcanvasHeight + spacer) figureCanvas = Image.new("RGB", figureSize, (255, 255, 255)) rowY = spacer for row, image in enumerate(mergedImages): labelCanvas = figUtil.getVerticalLabels(imageLabels[row], font, textGap) vOffset = (image.size[1] - labelCanvas.size[1]) / 2 imgUtil.pasteImage(labelCanvas, figureCanvas, spacer/2, rowY + topSpacers[row] + vOffset) imgUtil.pasteImage( image, figureCanvas, leftTextWidth, rowY + topSpacers[row]) x = leftTextWidth + width + spacer imgUtil.pasteImage(roiSplitPanes[row], figureCanvas, x, rowY) rowY = rowY + max(image.size[1] + topSpacers[row], roiSplitPanes[row].size[1]) + spacer return figureCanvas
def createMovieFigure(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer, algorithm, stepping, scalebar, overlayColour, timeUnits, imageLabels, maxColCount): """ Makes the complete Movie figure: A canvas showing an image per row with multiple columns showing frames from each image/movie. Labels obove each frame to show the time-stamp of that frame in the specified units and labels on the left name each image. @param session The OMERO session @param pixelIds A list of the Pixel IDs for the images in the figure @param tIndexes A list of tIndexes to display frames from @param zStart Projection Z-start @param zEnd Projection Z-end @param width Maximum width of panels @param height Max height of panels @param spacer Space between panels @param algorithm Projection algorithm e.g. "MAXIMUMINTENSITY" @param stepping Projecttion z-step @param scalebar A number of microns for scale-bar @param overlayColour Color of the scale bar as tuple (255,255,255) @param timeUnits A string such as "SECS" @param imageLabels A list of lists, corresponding to pixelIds, for labelling each image with one or more strings. """ mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() queryService = conn.getQueryService() rowPanels = [] totalHeight = 0 totalWidth = 0 maxImageWidth = 0 physicalSizeX = 0 for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row)) pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() sizeZ = pixels.getSizeZ().getValue() sizeT = pixels.getSizeT().getValue() if pixels.getPhysicalSizeX(): physicalX = pixels.getPhysicalSizeX().getValue() unitsX = pixels.getPhysicalSizeX().getSymbol() else: physicalX = 0 unitsX = "" if pixels.getPhysicalSizeY(): physicalY = pixels.getPhysicalSizeY().getValue() unitsY = pixels.getPhysicalSizeY().getSymbol() else: physicalY = 0 unitsY = "" log(" Pixel size: x: %s %s y: %s %s" % (str(physicalX), unitsX, str(physicalY), unitsY)) if row == 0: # set values for primary image physicalSizeX = physicalX physicalSizeY = physicalY else: # compare primary image with current one if physicalSizeX != physicalX or physicalSizeY != physicalY: log(" WARNING: Images have different pixel lengths. Scales" " are not comparable.") log(" Image dimensions (pixels): x: %d y: %d" % (sizeX, sizeY)) maxImageWidth = max(maxImageWidth, sizeX) # set up rendering engine with the pixels re.lookupPixels(pixelsId) if not re.lookupRenderingDef(pixelsId): re.resetDefaults() if not re.lookupRenderingDef(pixelsId): raise "Failed to lookup Rendering Def" re.load() proStart = zStart proEnd = zEnd # make sure we're within Z range for projection. if proEnd >= sizeZ: proEnd = sizeZ - 1 if proStart > sizeZ: proStart = 0 log(" WARNING: Current image has fewer Z-sections than the" " primary image.") # if we have an invalid z-range (start or end less than 0), show # default Z only if proStart < 0 or proEnd < 0: proStart = re.getDefaultZ() proEnd = proStart log(" Display Z-section: %d" % (proEnd+1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (proStart+1, proEnd+1, sizeZ)) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row renderedImages = [] for time in tIndexes: if time >= sizeT: log(" WARNING: This image does not have Time frame: %d. " "(max is %d)" % (time+1, sizeT)) else: if proStart != proEnd: renderedImg = re.renderProjectedCompressed( algorithm, time, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = time renderedImg = re.renderCompressed(planeDef) # create images and resize, add to list image = Image.open(StringIO.StringIO(renderedImg)) resizedImage = imgUtil.resizeImage(image, width, height) renderedImages.append(resizedImage) # make a canvas for the row of splitview images... # (will add time labels above each row) colCount = min(maxColCount, len(renderedImages)) rowCount = int(math.ceil(float(len(renderedImages)) / colCount)) font = imgUtil.getFont(width/12) fontHeight = font.getsize("Textq")[1] canvasWidth = ((width + spacer) * colCount) + spacer canvasHeight = rowCount * (spacer/2 + fontHeight + spacer + height) size = (canvasWidth, canvasHeight) # create a canvas of appropriate width, height canvas = Image.new(mode, size, white) # add text labels queryService = conn.getQueryService() textX = spacer textY = spacer/4 colIndex = 0 timeLabels = figUtil.getTimeLabels( queryService, pixelsId, tIndexes, sizeT, timeUnits) for t, tIndex in enumerate(tIndexes): if tIndex >= sizeT: continue time = timeLabels[t] textW = font.getsize(time)[0] inset = (width - textW) / 2 textdraw = ImageDraw.Draw(canvas) textdraw.text((textX+inset, textY), time, font=font, fill=(0, 0, 0)) textX += width + spacer colIndex += 1 if colIndex >= maxColCount: colIndex = 0 textX = spacer textY += (spacer/2 + fontHeight + spacer + height) # add scale bar to last frame... if scalebar: scaledImage = renderedImages[-1] xIndent = spacer yIndent = xIndent # if we've scaled to half size, zoom = 2 zoom = imgUtil.getZoomFactor(scaledImage.size, width, height) # and the scale bar will be half size sbar = float(scalebar) / zoom status, logMsg = figUtil.addScalebar( sbar, xIndent, yIndent, scaledImage, pixels, overlayColour) log(logMsg) px = spacer py = spacer + fontHeight colIndex = 0 # paste the images in for i, img in enumerate(renderedImages): imgUtil.pasteImage(img, canvas, px, py) px = px + width + spacer colIndex += 1 if colIndex >= maxColCount: colIndex = 0 px = spacer py += (spacer/2 + fontHeight + spacer + height) # Add labels to the left of the panel canvas = addLeftLabels(canvas, imageLabels, row, width, spacer) # most should be same width anyway totalWidth = max(totalWidth, canvas.size[0]) # add together the heights of each row totalHeight = totalHeight + canvas.size[1] rowPanels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom figureSize = (totalWidth, totalHeight+spacer) figureCanvas = Image.new(mode, figureSize, white) rowY = spacer / 2 for row in rowPanels: imgUtil.pasteImage(row, figureCanvas, 0, rowY) rowY = rowY + row.size[1] return figureCanvas
def createmovie_figure(conn, pixel_ids, t_indexes, z_start, z_end, width, height, spacer, algorithm, stepping, scalebar, overlay_colour, time_units, image_labels, max_col_count): """ Makes the complete Movie figure: A canvas showing an image per row with multiple columns showing frames from each image/movie. Labels obove each frame to show the time-stamp of that frame in the specified units and labels on the left name each image. @param conn The OMERO session @param pixel_ids A list of the Pixel IDs for the images in the figure @param t_indexes A list of tIndexes to display frames from @param z_start Projection Z-start @param z_end Projection Z-end @param width Maximum width of panels @param height Max height of panels @param spacer Space between panels @param algorithm Projection algorithm e.g. "MAXIMUMINTENSITY" @param stepping Projecttion z-step @param scalebar A number of microns for scale-bar @param overlay_colour Color of the scale bar as tuple (255,255,255) @param time_units A string such as "SECS" @param image_labels A list of lists, corresponding to pixelIds, for labelling each image with one or more strings. """ mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() query_service = conn.getQueryService() row_panels = [] total_height = 0 total_width = 0 max_image_width = 0 physical_size_x = 0 for row, pixels_id in enumerate(pixel_ids): log("Rendering row %d" % (row)) pixels = query_service.get("Pixels", pixels_id) size_x = pixels.getSizeX().getValue() size_y = pixels.getSizeY().getValue() size_z = pixels.getSizeZ().getValue() size_t = pixels.getSizeT().getValue() if pixels.getPhysicalSizeX(): physical_x = pixels.getPhysicalSizeX().getValue() units_x = pixels.getPhysicalSizeX().getSymbol() else: physical_x = 0 units_x = "" if pixels.getPhysicalSizeY(): physical_y = pixels.getPhysicalSizeY().getValue() units_y = pixels.getPhysicalSizeY().getSymbol() else: physical_y = 0 units_y = "" log(" Pixel size: x: %s %s y: %s %s" % (str(physical_x), units_x, str(physical_y), units_y)) if row == 0: # set values for primary image physical_size_x = physical_x physical_size_y = physical_y else: # compare primary image with current one if physical_size_x != physical_x or physical_size_y != physical_y: log(" WARNING: Images have different pixel lengths. Scales" " are not comparable.") log(" Image dimensions (pixels): x: %d y: %d" % (size_x, size_y)) max_image_width = max(max_image_width, size_x) # set up rendering engine with the pixels re.lookupPixels(pixels_id) if not re.lookupRenderingDef(pixels_id): re.resetDefaults() if not re.lookupRenderingDef(pixels_id): raise "Failed to lookup Rendering Def" re.load() pro_start = z_start pro_end = z_end # make sure we're within Z range for projection. if pro_end >= size_z: pro_end = size_z - 1 if pro_start > size_z: pro_start = 0 log(" WARNING: Current image has fewer Z-sections than the" " primary image.") # if we have an invalid z-range (start or end less than 0), show # default Z only if pro_start < 0 or pro_end < 0: pro_start = re.getDefaultZ() pro_end = pro_start log(" Display Z-section: %d" % (pro_end + 1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (pro_start + 1, pro_end + 1, size_z)) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row rendered_images = [] for time in t_indexes: if time >= size_t: log(" WARNING: This image does not have Time frame: %d. " "(max is %d)" % (time + 1, size_t)) else: if pro_start != pro_end: rendered_img = re.renderProjectedCompressed( algorithm, time, stepping, pro_start, pro_end) else: plane_def = omero.romio.PlaneDef() plane_def.z = pro_start plane_def.t = time rendered_img = re.renderCompressed(plane_def) # create images and resize, add to list image = Image.open(io.BytesIO(rendered_img)) resized_image = image_utils.resize_image(image, width, height) rendered_images.append(resized_image) # make a canvas for the row of splitview images... # (will add time labels above each row) col_count = min(max_col_count, len(rendered_images)) row_count = int(math.ceil(float(len(rendered_images)) / col_count)) font = image_utils.get_font(width / 12) font_height = font.getsize("Textq")[1] canvas_width = ((width + spacer) * col_count) + spacer canvas_height = row_count * (spacer / 2 + font_height + spacer + height) size = (canvas_width, canvas_height) # create a canvas of appropriate width, height canvas = Image.new(mode, size, white) # add text labels query_service = conn.getQueryService() text_x = spacer text_y = spacer / 4 col_index = 0 time_labels = figUtil.getTimeLabels(query_service, pixels_id, t_indexes, size_t, time_units) for t, t_index in enumerate(t_indexes): if t_index >= size_t: continue time = time_labels[t] text_w = font.getsize(time)[0] inset = (width - text_w) / 2 textdraw = ImageDraw.Draw(canvas) textdraw.text((text_x + inset, text_y), time, font=font, fill=(0, 0, 0)) text_x += width + spacer col_index += 1 if col_index >= max_col_count: col_index = 0 text_x = spacer text_y += (spacer / 2 + font_height + spacer + height) # add scale bar to last frame... if scalebar: scaled_image = rendered_images[-1] x_indent = spacer y_indent = x_indent # if we've scaled to half size, zoom = 2 zoom = image_utils.get_zoom_factor(scaled_image.size, width, height) # and the scale bar will be half size sbar = float(scalebar) / zoom status, log_msg = figUtil.addScalebar(sbar, x_indent, y_indent, scaled_image, pixels, overlay_colour) log(log_msg) px = spacer py = spacer + font_height col_index = 0 # paste the images in for i, img in enumerate(rendered_images): image_utils.paste_image(img, canvas, px, py) px = px + width + spacer col_index += 1 if col_index >= max_col_count: col_index = 0 px = spacer py += (spacer / 2 + font_height + spacer + height) # Add labels to the left of the panel canvas = add_left_labels(canvas, image_labels, row, width, spacer) # most should be same width anyway total_width = max(total_width, canvas.size[0]) # add together the heights of each row total_height = total_height + canvas.size[1] row_panels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom figure_size = (total_width, total_height + spacer) figure_canvas = Image.new(mode, figure_size, white) row_y = spacer / 2 for row in row_panels: image_utils.paste_image(row, figure_canvas, 0, row_y) row_y = row_y + row.size[1] return figure_canvas
def get_split_view(conn, pixel_ids, z_start, z_end, split_indexes, channel_names, colour_channels, merged_indexes, merged_colours, width=None, height=None, spacer=12, algorithm=None, stepping=1, scalebar=None, overlay_colour=(255, 255, 255)): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @merged_indexes. No text labels are added to the image at this stage. The figure is returned as a PIL 'Image' @ conn session for server access @ pixel_ids a list of the Ids for the pixels we want to display @ z_start the start of Z-range for projection @ z_end the end of Z-range for projection @ split_indexes a list of the channel indexes to display. Same channels for each image/row @ channel_names the Map of index:names to go above the columns for each split channel @ colour_channels the colour to make each column/ channel @ merged_indexes list or set of channels in the merged image @ merged_colours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ if algorithm is None: # omero::constants::projection::ProjectionType algorithm = ProjectionType.MAXIMUMINTENSITY timepoint = 0 mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() query_service = conn.getQueryService() row_panels = [] total_height = 0 total_width = 0 max_image_width = 0 physical_size_x = 0 log("Split View Rendering Log...") if z_start > -1 and z_end > -1: al_string = str(algorithm).replace("INTENSITY", " Intensity").capitalize() log("All images projected using '%s' projection with step size: " "%d start: %d end: %d" % (al_string, stepping, z_start + 1, z_end + 1)) else: log("Images show last-viewed Z-section") for row, pixels_id in enumerate(pixel_ids): log("Rendering row %d" % (row + 1)) pixels = query_service.get("Pixels", pixels_id) size_x = pixels.getSizeX().getValue() size_y = pixels.getSizeY().getValue() size_z = pixels.getSizeZ().getValue() size_c = pixels.getSizeC().getValue() if pixels.getPhysicalSizeX(): physical_x = pixels.getPhysicalSizeX().getValue() else: physical_x = 0 if pixels.getPhysicalSizeY(): physical_y = pixels.getPhysicalSizeY().getValue() else: physical_y = 0 log(" Pixel size (um): x: %.3f y: %.3f" % (physical_x, physical_y)) if row == 0: # set values for primary image physical_size_x = physical_x physical_size_y = physical_y else: # compare primary image with current one if physical_size_x != physical_x or physical_size_y != physical_y: log(" WARNING: Images have different pixel lengths." " Scales are not comparable.") log(" Image dimensions (pixels): x: %d y: %d" % (size_x, size_y)) max_image_width = max(max_image_width, size_x) # set up rendering engine with the pixels re.lookupPixels(pixels_id) if not re.lookupRenderingDef(pixels_id): re.resetDefaults() if not re.lookupRenderingDef(pixels_id): raise "Failed to lookup Rendering Def" re.load() pro_start = z_start pro_end = z_end # make sure we're within Z range for projection. if pro_end >= size_z: pro_end = size_z - 1 if pro_start > size_z: pro_start = 0 log(" WARNING: Current image has fewer Z-sections than the" " primary image.") # if we have an invalid z-range (start or end less than 0), show # default Z only if pro_start < 0 or pro_end < 0: pro_start = re.getDefaultZ() pro_end = pro_start log(" Display Z-section: %d" % (pro_end + 1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (pro_start + 1, pro_end + 1, size_z)) # turn on channels in merged_indexes for i in range(size_c): re.setActive(i, False) # Turn all off first log("Turning on merged_indexes: %s ..." % merged_indexes) for i in merged_indexes: if i >= size_c: channel_mismatch = True else: re.setActive(i, True) if i in merged_colours: re.setRGBA(i, *merged_colours[i]) # get the combined image, using the existing rendering settings channels_string = ", ".join([channel_names[i] for i in merged_indexes]) log(" Rendering merged channels: %s" % channels_string) if pro_start != pro_end: overlay = re.renderProjectedCompressed(algorithm, timepoint, stepping, pro_start, pro_end) else: plane_def = omero.romio.PlaneDef() plane_def.z = pro_start plane_def.t = timepoint overlay = re.renderCompressed(plane_def) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row rendered_images = [] i = 0 channel_mismatch = False # first, turn off all channels in pixels for i in range(size_c): re.setActive(i, False) # for each channel in the splitview... for index in split_indexes: if index >= size_c: # can't turn channel on - simply render black square! channel_mismatch = True rendered_images.append(None) else: re.setActive(index, True) # turn channel on if colour_channels: # if split channels are coloured... if index in merged_indexes: # and this channel is in the combined image if index in merged_colours: rgba = tuple(merged_colours[index]) re.setRGBA(index, *rgba) # set coloured else: merged_colours[index] = re.getRGBA(index) else: # otherwise set white (max alpha) re.setRGBA(index, 255, 255, 255, 255) else: # if not colour_channels - channels are white re.setRGBA(index, 255, 255, 255, 255) info = (index, re.getChannelWindowStart(index), re.getChannelWindowEnd(index)) log(" Render channel: %s start: %d end: %d" % info) if pro_start != pro_end: rendered_img = re.renderProjectedCompressed( algorithm, timepoint, stepping, pro_start, pro_end) else: plane_def = omero.romio.PlaneDef() plane_def.z = pro_start plane_def.t = timepoint rendered_img = re.renderCompressed(plane_def) rendered_images.append(rendered_img) if index < size_c: re.setActive(index, False) # turn the channel off again! if channel_mismatch: log(" WARNING channel mismatch: The current image has fewer" " channels than the primary image.") # make a canvas for the row of splitview images... # extra image for combined image image_count = len(rendered_images) + 1 canvas_width = ((width + spacer) * image_count) + spacer canvas_height = spacer + height size = (canvas_width, canvas_height) # create a canvas of appropriate width, height canvas = Image.new(mode, size, white) px = spacer py = spacer / 2 col = 0 # paste the images in for img in rendered_images: if img is None: im = Image.new(mode, (size_x, size_y), (0, 0, 0)) else: im = Image.open(io.BytesIO(img)) i = image_utils.resize_image(im, width, height) image_utils.paste_image(i, canvas, px, py) px = px + width + spacer col = col + 1 # add combined image, after resizing and adding scale bar i = Image.open(io.BytesIO(overlay)) scaled_image = image_utils.resize_image(i, width, height) if scalebar: x_indent = spacer y_indent = x_indent # if we've scaled to half size, zoom = 2 zoom = image_utils.get_zoom_factor(i.size, width, height) # and the scale bar will be half size sbar = float(scalebar) / zoom status, log_msg = figUtil.addScalebar(sbar, x_indent, y_indent, scaled_image, pixels, overlay_colour) log(log_msg) image_utils.paste_image(scaled_image, canvas, px, py) # most should be same width anyway total_width = max(total_width, canvas_width) # add together the heights of each row total_height = total_height + canvas_height row_panels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom figure_size = (total_width, total_height + spacer) figure_canvas = Image.new(mode, figure_size, white) row_y = spacer / 2 for row in row_panels: image_utils.paste_image(row, figure_canvas, 0, row_y) row_y = row_y + row.size[1] return figure_canvas
def getSplitView(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames, colourChannels, mergedIndexes, mergedColours, width=None, height=None, spacer = 12, algorithm = None, stepping = 1, scalebar = None, overlayColour=(255,255,255)): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @mergedIndexes. No text labels are added to the image at this stage. The figure is returned as a PIL 'Image' @ session session for server access @ pixelIds a list of the Ids for the pixels we want to display @ zStart the start of Z-range for projection @ zEnd the end of Z-range for projection @ splitIndexes a list of the channel indexes to display. Same channels for each image/row @ channelNames the Map of index:names to go above the columns for each split channel @ colourChannels the colour to make each column/ channel @ mergedIndexes list or set of channels in the merged image @ mergedColours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ if algorithm is None: # omero::constants::projection::ProjectionType algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY timepoint = 0 mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() queryService = conn.getQueryService() rowPanels = [] totalHeight = 0 totalWidth = 0 maxImageWidth = 0 physicalSizeX = 0 log("Split View Rendering Log...") if zStart >-1 and zEnd >-1: alString = str(algorithm).replace("INTENSITY", " Intensity").capitalize() log("All images projected using '%s' projection with step size: %d start: %d end: %d" % (alString, stepping, zStart+1, zEnd+1)) else: log("Images show last-viewed Z-section") for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row+1)) pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() sizeZ = pixels.getSizeZ().getValue() sizeC = pixels.getSizeC().getValue() if pixels.getPhysicalSizeX(): physicalX = pixels.getPhysicalSizeX().getValue() else: physicalX = 0 if pixels.getPhysicalSizeY(): physicalY = pixels.getPhysicalSizeY().getValue() else: physicalY = 0 log(" Pixel size (um): x: %.3f y: %.3f" % (physicalX, physicalY)) if row == 0: # set values for primary image physicalSizeX = physicalX physicalSizeY = physicalY else: # compare primary image with current one if physicalSizeX != physicalX or physicalSizeY != physicalY: log(" WARNING: Images have different pixel lengths. Scales are not comparable.") log(" Image dimensions (pixels): x: %d y: %d" % (sizeX, sizeY)) maxImageWidth = max(maxImageWidth, sizeX) # set up rendering engine with the pixels re.lookupPixels(pixelsId) if not re.lookupRenderingDef(pixelsId): re.resetDefaults() if not re.lookupRenderingDef(pixelsId): raise "Failed to lookup Rendering Def" re.load() proStart = zStart proEnd = zEnd # make sure we're within Z range for projection. if proEnd >= sizeZ: proEnd = sizeZ - 1 if proStart > sizeZ: proStart = 0 log(" WARNING: Current image has fewer Z-sections than the primary image.") # if we have an invalid z-range (start or end less than 0), show default Z only if proStart < 0 or proEnd < 0: proStart = re.getDefaultZ() proEnd = proStart log(" Display Z-section: %d" % (proEnd+1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (proStart+1, proEnd+1, sizeZ)) # turn on channels in mergedIndexes. for i in mergedIndexes: if i >= sizeC: channelMismatch = True else: re.setActive(i, True) if i in mergedColours: re.setRGBA(i, *mergedColours[i]) # get the combined image, using the existing rendering settings channelsString = ", ".join([channelNames[i] for i in mergedIndexes]) log(" Rendering merged channels: %s" % channelsString) if proStart != proEnd: overlay = re.renderProjectedCompressed(algorithm, timepoint, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = timepoint overlay = re.renderCompressed(planeDef) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row renderedImages = [] i = 0 channelMismatch = False # first, turn off all channels in pixels for i in range(sizeC): re.setActive(i, False) # for each channel in the splitview... for index in splitIndexes: if index >= sizeC: channelMismatch = True # can't turn channel on - simply render black square! renderedImages.append(None) else: re.setActive(index, True) # turn channel on if colourChannels: # if split channels are coloured... if index in mergedIndexes: # and this channel is in the combined image if index in mergedColours: rgba = tuple(mergedColours[index]) print "Setting channel to color", index, rgba re.setRGBA(index, *rgba) # set coloured else: mergedColours[index] = re.getRGBA(index) else: re.setRGBA(index,255,255,255,255) # otherwise set white (max alpha) else: re.setRGBA(index,255,255,255,255) # if not colourChannels - channels are white info = (index, re.getChannelWindowStart(index), re.getChannelWindowEnd(index)) log(" Render channel: %s start: %d end: %d" % info) if proStart != proEnd: renderedImg = re.renderProjectedCompressed(algorithm, timepoint, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = timepoint renderedImg = re.renderCompressed(planeDef) renderedImages.append(renderedImg) if index < sizeC: re.setActive(index, False) # turn the channel off again! if channelMismatch: log(" WARNING channel mismatch: The current image has fewer channels than the primary image.") # make a canvas for the row of splitview images... imageCount = len(renderedImages) + 1 # extra image for combined image canvasWidth = ((width + spacer) * imageCount) + spacer canvasHeight = spacer + height size = (canvasWidth, canvasHeight) canvas = Image.new(mode, size, white) # create a canvas of appropriate width, height px = spacer py = spacer/2 col = 0 # paste the images in for img in renderedImages: if img is None: im = Image.new(mode, (sizeX, sizeY), (0,0,0)) else: im = Image.open(StringIO.StringIO(img)) i = imgUtil.resizeImage(im, width, height) imgUtil.pasteImage(i, canvas, px, py) px = px + width + spacer col = col + 1 # add combined image, after resizing and adding scale bar i = Image.open(StringIO.StringIO(overlay)) scaledImage = imgUtil.resizeImage(i, width, height) if scalebar: xIndent = spacer yIndent = xIndent zoom = imgUtil.getZoomFactor(i.size, width, height) # if we've scaled to half size, zoom = 2 sbar = float(scalebar) / zoom # and the scale bar will be half size status, logMsg = figUtil.addScalebar(sbar, xIndent, yIndent, scaledImage, pixels, overlayColour) log(logMsg) imgUtil.pasteImage(scaledImage, canvas, px, py) totalWidth = max(totalWidth, canvasWidth) # most should be same width anyway totalHeight = totalHeight + canvasHeight # add together the heights of each row rowPanels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 spacer top and bottom figureSize = (totalWidth, totalHeight+spacer) figureCanvas = Image.new(mode, figureSize, white) rowY = spacer/2 for row in rowPanels: imgUtil.pasteImage(row, figureCanvas, 0, rowY) rowY = rowY + row.size[1] return figureCanvas
def getImageFrames(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer, algorithm, stepping, scalebar, overlayColour, timeUnits): """ Makes a canvas showing an image per row with multiple columns showing frames from each image/movie. Labels obove each frame to show the time-stamp of that frame in the specified units. @param session The OMERO session @param pixelIds A list of the Pixel IDs for the images in the figure @param tIndexes A list of tIndexes to display frames from @param zStart Projection Z-start @param zEnd Projection Z-end @param width Maximum width of panels @param height Max height of panels @param spacer Space between panels @param algorithm Projection algorithm e.g. "MAXIMUMINTENSITY" @param stepping Projecttion z-step @param scalebar A number of microns for scale-bar @param overlayColour Colour of the scale-bar as tuple (255,255,255) @param timeUnits A string such as "SECS" """ mode = "RGB" white = (255, 255, 255) # create a rendering engine re = conn.createRenderingEngine() queryService = conn.getQueryService() rowPanels = [] totalHeight = 0 totalWidth = 0 maxImageWidth = 0 physicalSizeX = 0 for row, pixelsId in enumerate(pixelIds): log("Rendering row %d" % (row)) pixels = queryService.get("Pixels", pixelsId) sizeX = pixels.getSizeX().getValue() sizeY = pixels.getSizeY().getValue() sizeZ = pixels.getSizeZ().getValue() sizeC = pixels.getSizeC().getValue() sizeT = pixels.getSizeT().getValue() if pixels.getPhysicalSizeX(): physicalX = pixels.getPhysicalSizeX().getValue() else: physicalX = 0 if pixels.getPhysicalSizeY(): physicalY = pixels.getPhysicalSizeY().getValue() else: physicalY = 0 log(" Pixel size (um): x: %s y: %s" % (str(physicalX), str(physicalY))) if row == 0: # set values for primary image physicalSizeX = physicalX physicalSizeY = physicalY else: # compare primary image with current one if physicalSizeX != physicalX or physicalSizeY != physicalY: log(" WARNING: Images have different pixel lengths. Scales are not comparable.") log(" Image dimensions (pixels): x: %d y: %d" % (sizeX, sizeY)) maxImageWidth = max(maxImageWidth, sizeX) # set up rendering engine with the pixels re.lookupPixels(pixelsId) if not re.lookupRenderingDef(pixelsId): re.resetDefaults() if not re.lookupRenderingDef(pixelsId): raise "Failed to lookup Rendering Def" re.load() proStart = zStart proEnd = zEnd # make sure we're within Z range for projection. if proEnd >= sizeZ: proEnd = sizeZ - 1 if proStart > sizeZ: proStart = 0 log(" WARNING: Current image has fewer Z-sections than the primary image.") # if we have an invalid z-range (start or end less than 0), show default Z only if proStart < 0 or proEnd < 0: proStart = re.getDefaultZ() proEnd = proStart log(" Display Z-section: %d" % (proEnd+1)) else: log(" Projecting z range: %d - %d (max Z is %d)" % (proStart+1, proEnd+1, sizeZ)) # now get each channel in greyscale (or colour) # a list of renderedImages (data as Strings) for the split-view row renderedImages = [] for time in tIndexes: if time >= sizeT: log(" WARNING: This image does not have Time frame: %d. (max is %d)" % (time+1, sizeT)) else: if proStart != proEnd: renderedImg = re.renderProjectedCompressed(algorithm, time, stepping, proStart, proEnd) else: planeDef = omero.romio.PlaneDef() planeDef.z = proStart planeDef.t = time renderedImg = re.renderCompressed(planeDef) # create images and resize, add to list image = Image.open(StringIO.StringIO(renderedImg)) resizedImage = imgUtil.resizeImage(image, width, height) renderedImages.append(resizedImage) # make a canvas for the row of splitview images...(will add time labels above each row) font = imgUtil.getFont(width/12) fontHeight = font.getsize("Textq")[1] canvasWidth = ((width + spacer) * len(renderedImages)) + spacer canvasHeight = spacer/2 + fontHeight + spacer + height size = (canvasWidth, canvasHeight) canvas = Image.new(mode, size, white) # create a canvas of appropriate width, height # add text labels queryService = conn.getQueryService() textX = spacer textY = spacer/4 timeLabels = figUtil.getTimeLabels(queryService, pixelsId, tIndexes, sizeT, timeUnits) for t, tIndex in enumerate(tIndexes): if tIndex >= sizeT: continue time = timeLabels[t] textW = font.getsize(time)[0] inset = (width - textW) / 2 textdraw = ImageDraw.Draw(canvas) textdraw.text((textX+inset, textY), time, font=font, fill=(0,0,0)) textX += width + spacer # add scale bar to last frame... if scalebar: scaledImage = renderedImages[-1] xIndent = spacer yIndent = xIndent zoom = imgUtil.getZoomFactor(scaledImage.size, width, height) # if we've scaled to half size, zoom = 2 sbar = float(scalebar) / zoom # and the scale bar will be half size status, logMsg = figUtil.addScalebar(sbar, xIndent, yIndent, scaledImage, pixels, overlayColour) log(logMsg) px = spacer py = spacer + fontHeight # paste the images in for img in renderedImages: imgUtil.pasteImage(img, canvas, px, py) px = px + width + spacer totalWidth = max(totalWidth, canvasWidth) # most should be same width anyway totalHeight = totalHeight + canvasHeight # add together the heights of each row rowPanels.append(canvas) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 spacer top and bottom figureSize = (totalWidth, totalHeight+spacer) figureCanvas = Image.new(mode, figureSize, white) rowY = spacer/2 for row in rowPanels: imgUtil.pasteImage(row, figureCanvas, 0, rowY) rowY = rowY + row.size[1] return figureCanvas
def get_split_view(conn, image_ids, pixel_ids, merged_indexes, merged_colours, width, height, image_labels, spacer, algorithm, stepping, scalebar, overlay_colour, roi_zoom, max_columns, show_roi_duration, roi_label): """ This method makes a figure of a number of images, arranged in rows with each row being the split-view of a single image. The channels are arranged left to right, with the combined image added on the right. The combined image is rendered according to current settings on the server, but it's channels will be turned on/off according to @mergedIndexes. The figure is returned as a PIL 'Image' @ session session for server access @ pixelIds a list of the Ids for the pixels we want to display @ mergedIndexes list or set of channels in the merged image @ mergedColours index: colour dictionary of channels in the merged image @ width the size in pixels to show each panel @ height the size in pixels to show each panel @ spacer the gap between images and around the figure. Doubled between rows. """ roi_service = conn.getRoiService() re = conn.createRenderingEngine() query_service = conn.getQueryService() # only needed for movie # establish dimensions and roiZoom for the primary image # getTheseValues from the server for iid in image_ids: rect = get_rectangle(roi_service, iid, roi_label) if rect is not None: break if rect is None: log("Found no images with rectangle ROIs") return x, y, roi_width, roi_height, time_shape_map = rect roi_outline = ((max(width, height)) // 200) + 1 if roi_zoom is None: # get the pixels for priamry image. pixels = query_service.get("Pixels", pixel_ids[0]) size_y = pixels.getSizeY().getValue() roi_zoom = float(height) / float(roi_height) log("ROI zoom set by primary image is %F X" % roi_zoom) else: log("ROI zoom: %F X" % roi_zoom) text_gap = spacer // 3 font_size = 12 if width > 500: font_size = 48 elif width > 400: font_size = 36 elif width > 300: font_size = 24 elif width > 200: font_size = 16 font = image_utils.get_font(font_size) text_height = font.getsize("Textq")[1] max_count = 0 for row in image_labels: max_count = max(max_count, len(row)) left_text_width = (text_height + text_gap) * max_count + spacer max_split_panel_width = 0 total_canvas_height = 0 merged_images = [] roi_split_panes = [] top_spacers = [] # space for labels above each row for row, pixels_id in enumerate(pixel_ids): log("Rendering row %d" % (row)) # need to get the roi dimensions from the server image_id = image_ids[row] roi = get_rectangle(roi_service, image_id, roi_label) if roi is None: log("No Rectangle ROI found for this image") del image_labels[row] # remove the corresponding labels continue roi_x, roi_y, roi_width, roi_height, time_shape_map = roi pixels = query_service.get("Pixels", pixels_id) size_x = pixels.getSizeX().getValue() size_y = pixels.getSizeY().getValue() # work out if any additional zoom is needed (if the full-sized image # is different size from primary image) full_size = (size_x, size_y) image_zoom = image_utils.get_zoom_factor(full_size, width, height) if image_zoom != 1.0: log(" Scaling down the full-size image by a factor of %F" % image_zoom) log(" ROI location (top-left of first frame) x: %d y: %d and size" " width: %d height: %d" % (roi_x, roi_y, roi_width, roi_height)) # get the split pane and full merged image roi_split_pane, full_merged_image, top_spacer = get_roi_movie_view( re, query_service, pixels, time_shape_map, merged_indexes, merged_colours, roi_width, roi_height, roi_zoom, spacer, algorithm, stepping, font_size, max_columns, show_roi_duration) # and now zoom the full-sized merged image, add scalebar merged_image = image_utils.resize_image(full_merged_image, width, height) if scalebar: x_indent = spacer y_indent = x_indent # and the scale bar will be half size sbar = float(scalebar) / image_zoom status, log_msg = figUtil.addScalebar(sbar, x_indent, y_indent, merged_image, pixels, overlay_colour) log(log_msg) # draw ROI onto mergedImage... # recalculate roi if the image has been zoomed x = roi_x // image_zoom y = roi_y // image_zoom roi_x2 = (roi_x + roi_width) // image_zoom roi_y2 = (roi_y + roi_height) // image_zoom draw_rectangle(merged_image, x, y, roi_x2, roi_y2, overlay_colour, roi_outline) # note the maxWidth of zoomed panels and total height for row max_split_panel_width = max(max_split_panel_width, roi_split_pane.size[0]) total_canvas_height += spacer + max(height + top_spacer, roi_split_pane.size[1]) merged_images.append(merged_image) roi_split_panes.append(roi_split_pane) top_spacers.append(top_spacer) # make a figure to combine all split-view rows # each row has 1/2 spacer above and below the panels. Need extra 1/2 # spacer top and bottom canvas_width = left_text_width + width + 2 * spacer + max_split_panel_width figure_size = (canvas_width, total_canvas_height + spacer) figure_canvas = Image.new("RGB", figure_size, (255, 255, 255)) row_y = spacer for row, image in enumerate(merged_images): label_canvas = figUtil.getVerticalLabels(image_labels[row], font, text_gap) v_offset = (image.size[1] - label_canvas.size[1]) // 2 image_utils.paste_image(label_canvas, figure_canvas, spacer // 2, row_y + top_spacers[row] + v_offset) image_utils.paste_image(image, figure_canvas, left_text_width, row_y + top_spacers[row]) x = left_text_width + width + spacer image_utils.paste_image(roi_split_panes[row], figure_canvas, x, row_y) row_y = row_y + max(image.size[1] + top_spacers[row], roi_split_panes[row].size[1]) + spacer return figure_canvas