Пример #1
0
def makeSplitViewFigure(conn,
                        pixelIds,
                        zStart,
                        zEnd,
                        splitIndexes,
                        channelNames,
                        colourChannels,
                        mergedIndexes,
                        mergedColours,
                        mergedNames,
                        width,
                        height,
                        imageLabels=None,
                        algorithm=None,
                        stepping=1,
                        scalebar=None,
                        overlayColour=(255, 255, 255)):
    """ This method makes a figure of a number of images, arranged in rows with each row being the split-view
    of a single image. The channels are arranged left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the server, but it's channels will be
    turned on/off according to @mergedIndexes. 
    The colour of each channel turned white if colourChannels is false or the channel is not in the merged image.
    Otherwise channel is changed to mergedColours[i]
    Text is added at the top of the figure, to display channel names above each column, and the 
    combined image may have it's various channels named in coloured text. The optional imageLabels is a list 
    of string lists for naming the images at the left of the figure (Each image may have 0 or multiple labels).
    
    The figure is returned as a PIL 'Image' 
    
    @ session    session for server access
    @ pixelIds        a list of the Ids for the pixels we want to display
    @ zStart        the start of Z-range for projection
    @ zEnd             the end of Z-range for projection
    @ splitIndexes     a list of the channel indexes to display. Same channels for each image/row
    @ channelNames         map of index:name to go above the columns for each split channel
    @ colourChannels     true if split channels are 
    @ mergedIndexes        list (or set) of channels in the merged image 
    @ mergedColours     index: colour map of channels in the merged image
    @ mergedNames        if true, label with merged panel with channel names (otherwise, label "Merged")
    @ width             the width of primary image (all images zoomed to this height)
    @ height            the height of primary image
    @ imageLabels         optional list of string lists.
    @ algorithm            for projection MAXIMUMINTENSITY or MEANINTENSITY
    @ stepping            projection increment 
    """

    fontsize = 12
    if width > 500:
        fontsize = 48
    elif width > 400:
        fontsize = 36
    elif width > 300:
        fontsize = 24
    elif width > 200:
        fontsize = 16

    spacer = (width / 25) + 2
    textGap = 3  # gap between text and image panels
    leftTextWidth = 0
    textHeight = 0

    # get the rendered splitview, with images surrounded on all sides by spacer
    sv = getSplitView(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames,
                      colourChannels, mergedIndexes, mergedColours, width,
                      height, spacer, algorithm, stepping, scalebar,
                      overlayColour)

    font = imgUtil.getFont(fontsize)
    mode = "RGB"
    white = (255, 255, 255)
    textHeight = font.getsize("Textq")[1]
    topSpacer = spacer + textHeight + textGap
    #textCanvas = Image.new(mode, (1,1), white)
    #textdraw = ImageDraw.Draw(textCanvas)
    #h = textdraw.textsize("Textq", font=font) [1]

    # if adding text to the left, write the text on horizontal canvas, then rotate to vertical (below)
    if imageLabels:
        # find max number of labels
        maxCount = 0
        rowHeights = []
        for row in imageLabels:
            maxCount = max(maxCount, len(row))
        leftTextWidth = (textHeight + textGap) * maxCount
        size = (sv.size[1], leftTextWidth
                )  # make the canvas as wide as the panels height
        textCanvas = Image.new(mode, size, white)
        textdraw = ImageDraw.Draw(textCanvas)
        px = spacer
        imageLabels.reverse()
        for row in imageLabels:
            py = leftTextWidth - textGap  # start at bottom
            for l, label in enumerate(row):
                py = py - textHeight  # find the top of this row
                w = textdraw.textsize(label, font=font)[0]
                inset = int((height - w) / 2)
                textdraw.text((px + inset, py),
                              label,
                              font=font,
                              fill=(0, 0, 0))
                py = py - textGap  # add space between rows
            px = px + spacer + height  # spacer between each row

    topTextHeight = textHeight + textGap
    if (mergedNames):
        topTextHeight = ((textHeight) * len(mergedIndexes))
    # make a canvas big-enough to add text to the images.
    canvasWidth = leftTextWidth + sv.size[0]
    canvasHeight = topTextHeight + sv.size[1]
    size = (canvasWidth, canvasHeight)
    canvas = Image.new(mode, size,
                       white)  # create a canvas of appropriate width, height

    # add the split-view panel
    pasteX = leftTextWidth
    pasteY = topTextHeight
    imgUtil.pasteImage(sv, canvas, pasteX, pasteY)

    draw = ImageDraw.Draw(canvas)

    # add text to rows
    # want it to be vertical. Rotate and paste the text canvas from above
    if imageLabels:
        textV = textCanvas.rotate(90)
        imgUtil.pasteImage(textV, canvas, spacer, topTextHeight)

    # add text to columns
    px = spacer + leftTextWidth
    py = topTextHeight + spacer - (textHeight + textGap
                                   )  # edges of panels - rowHeight
    for index in splitIndexes:
        # calculate the position of the text, centered above the image
        w = font.getsize(channelNames[index])[0]
        inset = int((width - w) / 2)
        # text is coloured if channel is grey AND in the merged image
        rgba = (0, 0, 0, 255)
        if index in mergedIndexes:
            if (not colourChannels) and (index in mergedColours):
                rgba = tuple(mergedColours[index])
                if rgba == (255, 255, 255,
                            255):  # if white (unreadable), needs to be black!
                    rgba = (0, 0, 0, 255)
        draw.text((px + inset, py), channelNames[index], font=font, fill=rgba)
        px = px + width + spacer

    # add text for combined image
    if (mergedNames):
        mergedIndexes.reverse()
        print "Adding merged channel names..."
        for index in mergedIndexes:
            rgba = (0, 0, 0, 255)
            if index in mergedColours:
                rgba = tuple(mergedColours[index])
                print index, channelNames[index], rgba
                if rgba == (255, 255, 255,
                            255):  # if white (unreadable), needs to be black!
                    rgba = (0, 0, 0, 255)
            name = channelNames[index]
            combTextWidth = font.getsize(name)[0]
            inset = int((width - combTextWidth) / 2)
            draw.text((px + inset, py), name, font=font, fill=rgba)
            py = py - textHeight
    else:
        combTextWidth = font.getsize("Merged")[0]
        inset = int((width - combTextWidth) / 2)
        px = px + inset
        draw.text((px, py), "Merged", font=font, fill=(0, 0, 0))

    return canvas
Пример #2
0
def getSplitView(conn,
                 pixelIds,
                 zStart,
                 zEnd,
                 splitIndexes,
                 channelNames,
                 colourChannels,
                 mergedIndexes,
                 mergedColours,
                 width=None,
                 height=None,
                 spacer=12,
                 algorithm=None,
                 stepping=1,
                 scalebar=None,
                 overlayColour=(255, 255, 255)):
    """ This method makes a figure of a number of images, arranged in rows with each row being the split-view
    of a single image. The channels are arranged left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the server, but it's channels will be
    turned on/off according to @mergedIndexes. 
    No text labels are added to the image at this stage. 
    
    The figure is returned as a PIL 'Image' 
    
    @ session    session for server access
    @ pixelIds        a list of the Ids for the pixels we want to display
    @ zStart        the start of Z-range for projection
    @ zEnd             the end of Z-range for projection
    @ splitIndexes     a list of the channel indexes to display. Same channels for each image/row
    @ channelNames         the Map of index:names to go above the columns for each split channel
    @ colourChannels     the colour to make each column/ channel
    @ mergedIndexes      list or set of channels in the merged image 
    @ mergedColours     index: colour dictionary of channels in the merged image
    @ width            the size in pixels to show each panel
    @ height        the size in pixels to show each panel
    @ spacer        the gap between images and around the figure. Doubled between rows. 
    """

    if algorithm is None:  # omero::constants::projection::ProjectionType
        algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY
    timepoint = 0
    mode = "RGB"
    white = (255, 255, 255)

    # create a rendering engine
    re = conn.createRenderingEngine()
    queryService = conn.getQueryService()

    rowPanels = []
    totalHeight = 0
    totalWidth = 0
    maxImageWidth = 0

    physicalSizeX = 0

    log("Split View Rendering Log...")

    if zStart > -1 and zEnd > -1:
        alString = str(algorithm).replace("INTENSITY",
                                          " Intensity").capitalize()
        log("All images projected using '%s' projection with step size: %d  start: %d  end: %d"
            % (alString, stepping, zStart + 1, zEnd + 1))
    else:
        log("Images show last-viewed Z-section")

    for row, pixelsId in enumerate(pixelIds):
        log("Rendering row %d" % (row + 1))

        pixels = queryService.get("Pixels", pixelsId)
        sizeX = pixels.getSizeX().getValue()
        sizeY = pixels.getSizeY().getValue()
        sizeZ = pixels.getSizeZ().getValue()
        sizeC = pixels.getSizeC().getValue()

        if pixels.getPhysicalSizeX():
            physicalX = pixels.getPhysicalSizeX().getValue()
        else:
            physicalX = 0
        if pixels.getPhysicalSizeY():
            physicalY = pixels.getPhysicalSizeY().getValue()
        else:
            physicalY = 0
        log("  Pixel size (um): x: %.3f  y: %.3f" % (physicalX, physicalY))
        if row == 0:  # set values for primary image
            physicalSizeX = physicalX
            physicalSizeY = physicalY
        else:  # compare primary image with current one
            if physicalSizeX != physicalX or physicalSizeY != physicalY:
                log(" WARNING: Images have different pixel lengths. Scales are not comparable."
                    )

        log("  Image dimensions (pixels): x: %d  y: %d" % (sizeX, sizeY))
        maxImageWidth = max(maxImageWidth, sizeX)

        # set up rendering engine with the pixels
        re.lookupPixels(pixelsId)
        if not re.lookupRenderingDef(pixelsId):
            re.resetDefaults()
        if not re.lookupRenderingDef(pixelsId):
            raise "Failed to lookup Rendering Def"
        re.load()

        proStart = zStart
        proEnd = zEnd
        # make sure we're within Z range for projection.
        if proEnd >= sizeZ:
            proEnd = sizeZ - 1
            if proStart > sizeZ:
                proStart = 0
            log(" WARNING: Current image has fewer Z-sections than the primary image."
                )

        # if we have an invalid z-range (start or end less than 0), show default Z only
        if proStart < 0 or proEnd < 0:
            proStart = re.getDefaultZ()
            proEnd = proStart
            log("  Display Z-section: %d" % (proEnd + 1))
        else:
            log("  Projecting z range: %d - %d   (max Z is %d)" %
                (proStart + 1, proEnd + 1, sizeZ))

        # turn on channels in mergedIndexes.
        for i in mergedIndexes:
            if i >= sizeC:
                channelMismatch = True
            else:
                re.setActive(i, True)
                if i in mergedColours:
                    re.setRGBA(i, *mergedColours[i])

        # get the combined image, using the existing rendering settings
        channelsString = ", ".join([channelNames[i] for i in mergedIndexes])
        log("  Rendering merged channels: %s" % channelsString)
        if proStart != proEnd:
            overlay = re.renderProjectedCompressed(algorithm, timepoint,
                                                   stepping, proStart, proEnd)
        else:
            planeDef = omero.romio.PlaneDef()
            planeDef.z = proStart
            planeDef.t = timepoint
            overlay = re.renderCompressed(planeDef)

        # now get each channel in greyscale (or colour)
        # a list of renderedImages (data as Strings) for the split-view row
        renderedImages = []
        i = 0
        channelMismatch = False
        # first, turn off all channels in pixels
        for i in range(sizeC):
            re.setActive(i, False)

        # for each channel in the splitview...
        for index in splitIndexes:
            if index >= sizeC:
                channelMismatch = True  # can't turn channel on - simply render black square!
                renderedImages.append(None)
            else:
                re.setActive(index, True)  # turn channel on
                if colourChannels:  # if split channels are coloured...
                    if index in mergedIndexes:  # and this channel is in the combined image
                        if index in mergedColours:
                            rgba = tuple(mergedColours[index])
                            print "Setting channel to color", index, rgba
                            re.setRGBA(index, *rgba)  # set coloured
                        else:
                            mergedColours[index] = re.getRGBA(index)
                    else:
                        re.setRGBA(index, 255, 255, 255,
                                   255)  # otherwise set white (max alpha)
                else:
                    re.setRGBA(
                        index, 255, 255, 255,
                        255)  # if not colourChannels - channels are white
                info = (index, re.getChannelWindowStart(index),
                        re.getChannelWindowEnd(index))
                log("  Render channel: %s  start: %d  end: %d" % info)
                if proStart != proEnd:
                    renderedImg = re.renderProjectedCompressed(
                        algorithm, timepoint, stepping, proStart, proEnd)
                else:
                    planeDef = omero.romio.PlaneDef()
                    planeDef.z = proStart
                    planeDef.t = timepoint
                    renderedImg = re.renderCompressed(planeDef)
                renderedImages.append(renderedImg)
            if index < sizeC:
                re.setActive(index, False)  # turn the channel off again!

        if channelMismatch:
            log(" WARNING channel mismatch: The current image has fewer channels than the primary image."
                )

        # make a canvas for the row of splitview images...
        imageCount = len(renderedImages) + 1  # extra image for combined image
        canvasWidth = ((width + spacer) * imageCount) + spacer
        canvasHeight = spacer + height
        size = (canvasWidth, canvasHeight)
        canvas = Image.new(
            mode, size, white)  # create a canvas of appropriate width, height

        px = spacer
        py = spacer / 2
        col = 0
        # paste the images in
        for img in renderedImages:
            if img is None:
                im = Image.new(mode, (sizeX, sizeY), (0, 0, 0))
            else:
                im = Image.open(StringIO.StringIO(img))
            i = imgUtil.resizeImage(im, width, height)
            imgUtil.pasteImage(i, canvas, px, py)
            px = px + width + spacer
            col = col + 1

        # add combined image, after resizing and adding scale bar
        i = Image.open(StringIO.StringIO(overlay))
        scaledImage = imgUtil.resizeImage(i, width, height)
        if scalebar:
            xIndent = spacer
            yIndent = xIndent
            zoom = imgUtil.getZoomFactor(
                i.size, width,
                height)  # if we've scaled to half size, zoom = 2
            sbar = float(
                scalebar) / zoom  # and the scale bar will be half size
            status, logMsg = figUtil.addScalebar(sbar, xIndent, yIndent,
                                                 scaledImage, pixels,
                                                 overlayColour)
            log(logMsg)

        imgUtil.pasteImage(scaledImage, canvas, px, py)

        totalWidth = max(totalWidth,
                         canvasWidth)  # most should be same width anyway
        totalHeight = totalHeight + canvasHeight  # add together the heights of each row
        rowPanels.append(canvas)

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2 spacer top and bottom
    figureSize = (totalWidth, totalHeight + spacer)
    figureCanvas = Image.new(mode, figureSize, white)

    rowY = spacer / 2
    for row in rowPanels:
        imgUtil.pasteImage(row, figureCanvas, 0, rowY)
        rowY = rowY + row.size[1]

    return figureCanvas
def getROIsplitView    (re, pixels, zStart, zEnd, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, mergedColours, 
            roiX, roiY, roiWidth, roiHeight, roiZoom, tIndex, spacer, algorithm, stepping, fontsize, showTopLabels):
    """ This takes a ROI rectangle from an image and makes a split view canvas of the region in the ROI, zoomed 
        by a defined factor. 
        
    @param    re        The OMERO rendering engine. 
    """
    
    if algorithm is None:    # omero::constants::projection::ProjectionType
        algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY
    mode = "RGB"
    white = (255, 255, 255)    
    
    sizeX = pixels.getSizeX().getValue()
    sizeY = pixels.getSizeY().getValue()
    sizeZ = pixels.getSizeZ().getValue()
    sizeC = pixels.getSizeC().getValue()
    
    if pixels.getPhysicalSizeX():
        physicalX = pixels.getPhysicalSizeX().getValue()
    else:
        physicalX = 0 
    if pixels.getPhysicalSizeY():
        physicalY = pixels.getPhysicalSizeY().getValue()
    else:
        physicalY = 0
    log("  Pixel size (um): x: %.3f  y: %.3f" % (physicalX, physicalY))
    log("  Image dimensions (pixels): x: %d  y: %d" % (sizeX, sizeY))
    
    log(" Projecting ROIs...")
    proStart = zStart
    proEnd = zEnd
    # make sure we're within Z range for projection. 
    if proEnd >= sizeZ:
        proEnd = sizeZ - 1
        if proStart > sizeZ:
            proStart = 0
        log(" WARNING: Current image has fewer Z-sections than the primary image projection.")
    if proStart < 0:
        proStart = 0
    log("  Projecting z range: %d - %d   (max Z is %d)" % (proStart+1, proEnd+1, sizeZ))
    # set up rendering engine with the pixels
    pixelsId = pixels.getId().getValue()
    re.lookupPixels(pixelsId)
    if not re.lookupRenderingDef(pixelsId):
        re.resetDefaults()
    if not re.lookupRenderingDef(pixelsId):
        raise "Failed to lookup Rendering Def"
    re.load()
    
    # if we are missing some merged colours, get them from rendering engine. 
    for index in mergedIndexes:
        if index not in mergedColours:
            color = tuple(re.getRGBA(index))
            mergedColours[index] = color
            print "Adding colour to index", color, index 
    
    # now get each channel in greyscale (or colour)
    # a list of renderedImages (data as Strings) for the split-view row
    renderedImages = []
    panelWidth = 0
    channelMismatch = False
    # first, turn off all channels in pixels
    for i in range(sizeC): 
        re.setActive(i, False)
        
    # for each channel in the splitview...
    for index in splitIndexes:
        if index >= sizeC:
            channelMismatch = True        # can't turn channel on - simply render black square! 
        else:
            re.setActive(index, True)                # turn channel on
            if colourChannels:                            # if split channels are coloured...
                if index in mergedColours:            # and this channel is in the combined image
                    rgba = tuple(mergedColours[index])
                    re.setRGBA(index, *rgba)        # set coloured 
                else:
                    re.setRGBA(index,255,255,255,255)
            else:
                re.setRGBA(index,255,255,255,255)    # if not colourChannels - channels are white
            info = (channelNames[index], re.getChannelWindowStart(index), re.getChannelWindowEnd(index))
            log("  Render channel: %s  start: %d  end: %d" % info)
            box = (roiX, roiY, roiX+roiWidth, roiY+roiHeight)
            if proStart == proEnd:
                # if it's a single plane, we can render a region (region not supported with projection)
                planeDef = omero.romio.PlaneDef()
                planeDef.z = long(proStart)
                planeDef.t = long(tIndex)
                regionDef = omero.romio.RegionDef()
                regionDef.x = roiX
                regionDef.y = roiY
                regionDef.width = roiWidth
                regionDef.height = roiHeight
                planeDef.region = regionDef
                rPlane = re.renderCompressed(planeDef)
                roiImage = Image.open(StringIO.StringIO(rPlane))
            else:
                projection = re.renderProjectedCompressed(algorithm, tIndex, stepping, proStart, proEnd)
                fullImage = Image.open(StringIO.StringIO(projection))
                roiImage = fullImage.crop(box)
                roiImage.load()        # hoping that when we zoom, don't zoom fullImage
            if roiZoom is not 1:
                newSize = (int(roiWidth*roiZoom), int(roiHeight*roiZoom))
                roiImage = roiImage.resize(newSize)
            renderedImages.append(roiImage)
            panelWidth = roiImage.size[0]
            re.setActive(index, False)                # turn the channel off again!
            
            
    # turn on channels in mergedIndexes.
    for i in mergedIndexes: 
        if i >= sizeC:
            channelMismatch = True
        else:
            re.setActive(i, True)
            if i in mergedColours:
                rgba = mergedColours[i]
                re.setRGBA(i, *rgba)
                
    # get the combined image, using the existing rendering settings 
    channelsString = ", ".join([str(i) for i in mergedIndexes])
    log("  Rendering merged channels: %s" % channelsString)
    if proStart != proEnd:
        merged = re.renderProjectedCompressed(algorithm, tIndex, stepping, proStart, proEnd)
    else:
        planeDef = omero.romio.PlaneDef()
        planeDef.z = proStart
        planeDef.t = tIndex
        merged = re.renderCompressed(planeDef)
    fullMergedImage = Image.open(StringIO.StringIO(merged))
    roiMergedImage = fullMergedImage.crop(box)
    roiMergedImage.load()    # make sure this is not just a lazy copy of the full image
    if roiZoom is not 1:
        newSize = (int(roiWidth*roiZoom), int(roiHeight*roiZoom))
        roiMergedImage = roiMergedImage.resize(newSize)
        
    if channelMismatch:
        log(" WARNING channel mismatch: The current image has fewer channels than the primary image.")
            
    # now assemble the roi split-view canvas
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    topSpacer = 0
    if showTopLabels: 
        if mergedNames:
            topSpacer = (textHeight * len(mergedIndexes)) + spacer
        else:
            topSpacer = textHeight + spacer
    imageCount = len(renderedImages) + 1     # extra image for merged image
    canvasWidth = ((panelWidth + spacer) * imageCount) - spacer    # no spaces around panels
    canvasHeight = renderedImages[0].size[1] + topSpacer
    size = (canvasWidth, canvasHeight)
    canvas = Image.new(mode, size, white)        # create a canvas of appropriate width, height
    
    px = 0
    textY = topSpacer - textHeight - spacer/2
    panelY = topSpacer
    # paste the split images in, with channel labels
    draw = ImageDraw.Draw(canvas)
    print "mergedColours", mergedColours
    for i, index in enumerate(splitIndexes):
        label = channelNames[index]
        indent = (panelWidth - (font.getsize(label)[0])) / 2
        # text is coloured if channel is not coloured AND in the merged image
        rgb = (0,0,0)
        if index in mergedColours:
            if not colourChannels:
                rgb = tuple(mergedColours[index])
                if rgb == (255,255,255,255):    # if white (unreadable), needs to be black! 
                    rgb = (0,0,0)
        if showTopLabels: draw.text((px+indent, textY), label, font=font, fill=rgb)
        if i < len(renderedImages):
            imgUtil.pasteImage(renderedImages[i], canvas, px, panelY)
        px = px + panelWidth + spacer
    # and the merged image
    if showTopLabels:
        #indent = (panelWidth - (font.getsize("Merged")[0])) / 2
        #draw.text((px+indent, textY), "Merged", font=font, fill=(0,0,0))
        if (mergedNames):
            for index in mergedIndexes:
                if index in mergedColours: 
                    rgb = tuple(mergedColours[index])
                    if rgb == (255,255,255,255): rgb = (0,0,0)
                else: rgb = (0,0,0) 
                if index in channelNames: name = channelNames[index]
                else: name = str(index) 
                combTextWidth = font.getsize(name)[0]
                inset = int((panelWidth - combTextWidth) / 2)
                draw.text((px + inset, textY), name, font=font, fill=rgb)
                textY = textY - textHeight  
        else:
            combTextWidth = font.getsize("Merged")[0]
            inset = int((panelWidth - combTextWidth) / 2)
            draw.text((px + inset, textY), "Merged", font=font, fill=(0,0,0))
    imgUtil.pasteImage(roiMergedImage, canvas, px, panelY)
    
    # return the roi splitview canvas, as well as the full merged image
    return (canvas, fullMergedImage, panelY)
def makeThumbnailFigure(conn, scriptParams):    
    """
    Makes the figure using the parameters in @scriptParams, attaches the figure to the 
    parent Project/Dataset, and returns the file-annotation ID
    
    @ returns        Returns the id of the originalFileLink child. (ID object, not value)
    """
        
    log("Thumbnail figure created by OMERO")
    log("")

    parent = None        # figure will be attached to this object 

    imageIds = []
    datasetIds = []
    
    dataType = scriptParams["Data_Type"]
    if dataType == "Image":
        imageIds = scriptParams["IDs"]
        if "Parent_ID" in scriptParams and len(imageIds) > 1:
            pId = scriptParams["Parent_ID"]
            parent = conn.getObject("Dataset", pId)
            if parent:
                log("Figure will be linked to Dataset: %s" % parent.getName())
        if parent == None:
            parent = conn.getObject("Image", imageIds[0])
            if parent:
                log("Figure will be linked to Image: %s" % parent.getName())
                
    else:   # Dataset
        datasetIds = scriptParams["IDs"]
        if "Parent_ID" in scriptParams and len(datasetIds) > 1:
            pId = scriptParams["Parent_ID"]
            parent = conn.getObject("Project", pId)
            if parent:
                log("Figure will be linked to Project: %s" % parent.getName().getValue())
        if parent == None:
            parent = conn.getObject("Dataset", datasetIds[0])
            if parent:
                log("Figure will be linked to Dataset: %s" % parent.getName())
    
    if len(imageIds) == 0 and len(datasetIds) == 0:
        print "No image IDs or dataset IDs found"       
    
    tagIds = []
    if "Tag_IDs" in scriptParams:
        tagIds = scriptParams['Tag_IDs']
    if len(tagIds) == 0:
        tagIds = None
        
    showUntagged = False
    if (tagIds):
        showUntagged = scriptParams["Show_Untagged_Images"]

    thumbSize = scriptParams["Thumbnail_Size"]
    maxColumns = scriptParams["Max_Columns"]

    figHeight = 0
    figWidth = 0
    dsCanvases = []

    for datasetId in datasetIds:
        dataset = conn.getObject("Dataset", datasetId)
        if dataset == None:
            log("No dataset found for ID: %s" % datasetId)
            continue
        datasetName = dataset.getName()
        images = list(dataset.listChildren())
        log("Dataset: %s     ID: %d" % (datasetName, datasetId))
        dsCanvas = paintDatasetCanvas(conn, images, datasetName, tagIds, showUntagged, length=thumbSize, colCount=maxColumns)
        if dsCanvas == None:
            continue
        dsCanvases.append(dsCanvas)
        figHeight += dsCanvas.size[1]
        figWidth = max(figWidth, dsCanvas.size[0])
        
    if len(datasetIds) == 0:
        images = list(conn.getObjects("Image", imageIds))
        imageCanvas = paintDatasetCanvas(conn, images, "", tagIds, showUntagged, length=thumbSize, colCount=maxColumns)
        dsCanvases.append(imageCanvas)
        figHeight += imageCanvas.size[1]
        figWidth = max(figWidth, imageCanvas.size[0])
    
    if len(dsCanvases) == 0:
        return None
    figure = Image.new("RGB", (figWidth, figHeight), WHITE)
    y = 0
    for ds in dsCanvases:
        imgUtil.pasteImage(ds, figure, 0, y)
        y += ds.size[1]
    
    
    log("")
    figLegend = "\n".join(logLines)
    
    format = scriptParams["Format"]
    output = scriptParams["Figure_Name"]
        
    if format == 'PNG':
        output = output + ".png"
        figure.save(output, "PNG")
        mimetype = "image/png"
    else:
        output = output + ".jpg"
        figure.save(output)
        mimetype = "image/jpeg"

    fa = conn.createFileAnnfromLocalFile(output, origFilePathAndName=None, mimetype=mimetype, ns=None, desc=figLegend)
    parent.linkAnnotation(fa)
    
    return fa._obj
Пример #5
0
def getSplitView(conn, imageIds, pixelIds, splitIndexes, channelNames,
                 mergedNames, colourChannels, mergedIndexes, mergedColours,
                 width, height, imageLabels, spacer, algorithm, stepping,
                 scalebar, overlayColour, roiZoom, roiLabel):

    """
    This method makes a figure of a number of images, arranged in rows with
    each row being the split-view of a single image. The channels are arranged
    left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the
    server, but it's channels will be turned on/off according to
    @mergedIndexes.

    The figure is returned as a PIL 'Image'

    @ session           session for server access
    @ pixelIds          a list of the Ids for the pixels we want to display
    @ splitIndexes      a list of the channel indexes to display. Same
                        channels for each image/row
    @ channelNames      the Map of index:names for all channels
    @ zStart            the start of Z-range for projection
    @ zEnd              the end of Z-range for projection
    @ colourChannels    the colour to make each column/ channel
    @ mergedIndexes     list or set of channels in the merged image
    @ mergedColours     index: colour dictionary of channels in the merged
                        image
    @ width             the size in pixels to show each panel
    @ height            the size in pixels to show each panel
    @ spacer            the gap between images and around the figure. Doubled
                        between rows.
    """

    roiService = conn.getRoiService()
    re = conn.createRenderingEngine()
    queryService = conn.getQueryService()    # only needed for movie

    # establish dimensions and roiZoom for the primary image
    # getTheseValues from the server
    rect = getRectangle(roiService, imageIds[0], roiLabel)
    if rect is None:
        raise Exception("No ROI found for the first image.")
    roiX, roiY, roiWidth, roiHeight, yMin, yMax, tMin, tMax = rect

    roiOutline = ((max(width, height)) / 200) + 1

    if roiZoom is None:
        # get the pixels for priamry image.
        pixels = queryService.get("Pixels", pixelIds[0])
        sizeY = pixels.getSizeY().getValue()

        roiZoom = float(height) / float(roiHeight)
        log("ROI zoom set by primary image is %F X" % roiZoom)
    else:
        log("ROI zoom: %F X" % roiZoom)

    textGap = spacer/3
    fontsize = 12
    if width > 500:
        fontsize = 48
    elif width > 400:
        fontsize = 36
    elif width > 300:
        fontsize = 24
    elif width > 200:
        fontsize = 16
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    maxCount = 0
    for row in imageLabels:
        maxCount = max(maxCount, len(row))
    leftTextWidth = (textHeight + textGap) * maxCount + spacer

    maxSplitPanelWidth = 0
    totalcanvasHeight = 0
    mergedImages = []
    roiSplitPanes = []
    topSpacers = []         # space for labels above each row

    showLabelsAboveEveryRow = False
    invalidImages = []      # note any image row indexes that don't have ROIs.

    for row, pixelsId in enumerate(pixelIds):
        log("Rendering row %d" % (row))

        if showLabelsAboveEveryRow:
            showTopLabels = True
        else:
            showTopLabels = (row == 0)  # only show top labels for first row

        # need to get the roi dimensions from the server
        imageId = imageIds[row]
        roi = getRectangle(roiService, imageId, roiLabel)
        if roi is None:
            log("No Rectangle ROI found for this image")
            invalidImages.append(row)
            continue
        roiX, roiY, roiWidth, roiHeight, zMin, zMax, tStart, tEnd = roi

        pixels = queryService.get("Pixels", pixelsId)
        sizeX = pixels.getSizeX().getValue()
        sizeY = pixels.getSizeY().getValue()

        zStart = zMin
        zEnd = zMax

        # work out if any additional zoom is needed (if the full-sized image
        # is different size from primary image)
        fullSize = (sizeX, sizeY)
        imageZoom = imgUtil.getZoomFactor(fullSize, width, height)
        if imageZoom != 1.0:
            log("  Scaling down the full-size image by a factor of %F"
                % imageZoom)

        log("  ROI location (top-left) x: %d  y: %d  and size width:"
            " %d  height: %d" % (roiX, roiY, roiWidth, roiHeight))
        log("  ROI time: %d - %d   zRange: %d - %d"
            % (tStart+1, tEnd+1, zStart+1, zEnd+1))
        # get the split pane and full merged image
        roiSplitPane, fullMergedImage, topSpacer = getROIsplitView(
            re, pixels, zStart, zEnd, splitIndexes, channelNames, mergedNames,
            colourChannels, mergedIndexes, mergedColours, roiX, roiY,
            roiWidth, roiHeight, roiZoom, tStart, spacer, algorithm, stepping,
            fontsize, showTopLabels)

        # and now zoom the full-sized merged image, add scalebar
        mergedImage = imgUtil.resizeImage(fullMergedImage, width, height)
        if scalebar:
            xIndent = spacer
            yIndent = xIndent
            # and the scale bar will be half size
            sbar = float(scalebar) / imageZoom
            status, logMsg = figUtil.addScalebar(
                sbar, xIndent, yIndent, mergedImage, pixels, overlayColour)
            log(logMsg)

        # draw ROI onto mergedImage...
        # recalculate roi if the image has been zoomed
        x = roiX / imageZoom
        y = roiY / imageZoom
        roiX2 = (roiX + roiWidth) / imageZoom
        roiY2 = (roiY + roiHeight) / imageZoom
        drawRectangle(
            mergedImage, x, y, roiX2, roiY2, overlayColour, roiOutline)

        # note the maxWidth of zoomed panels and total height for row
        maxSplitPanelWidth = max(maxSplitPanelWidth, roiSplitPane.size[0])
        totalcanvasHeight += spacer + max(height+topSpacer,
                                          roiSplitPane.size[1])

        mergedImages.append(mergedImage)
        roiSplitPanes.append(roiSplitPane)
        topSpacers.append(topSpacer)

    # remove the labels for the invalid images (without ROIs)
    invalidImages.reverse()
    for row in invalidImages:
        del imageLabels[row]

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    canvasWidth = leftTextWidth + width + spacer + maxSplitPanelWidth + spacer
    figureSize = (canvasWidth, totalcanvasHeight + spacer)
    figureCanvas = Image.new("RGB", figureSize, (255, 255, 255))

    rowY = spacer
    for row, image in enumerate(mergedImages):
        labelCanvas = figUtil.getVerticalLabels(imageLabels[row], font,
                                                textGap)
        vOffset = (image.size[1] - labelCanvas.size[1]) / 2
        imgUtil.pasteImage(labelCanvas, figureCanvas, spacer/2,
                           rowY + topSpacers[row] + vOffset)
        imgUtil.pasteImage(
            image, figureCanvas, leftTextWidth, rowY + topSpacers[row])
        x = leftTextWidth + width + spacer
        imgUtil.pasteImage(roiSplitPanes[row], figureCanvas, x, rowY)
        rowY = rowY + max(image.size[1] + topSpacers[row],
                          roiSplitPanes[row].size[1]) + spacer

    return figureCanvas
def addLeftLabels(panelCanvas, imageLabels, rowIndex, width, spacer):
    """
    Takes a canvas of panels and adds one or more labels to the left,
    with the text aligned vertically.
    NB: We are passed the set of labels for ALL image panels (as well as the
    index of the current image panel) so that we know what is the max label
    count and can give all panels the same margin on the left.

    @param panelCanvas:     PIL image - add labels to the left of this
    @param imageLabels:     A series of label lists, one per image. We only
                            add labels from one list
    @param rowIndex:        The index of the label list we're going to use
                            from imageLabels
    @param width:           Simply used for finding a suitable font size
    @param spacer:          Space between panels
    """

    # add lables to row...
    mode = "RGB"
    white = (255, 255, 255)
    font = imgUtil.getFont(width/12)
    textHeight = font.getsize("Sampleq")[1]
    textGap = spacer / 2
    # rowSpacing = panelCanvas.size[1]/len(pixelIds)

    # find max number of labels
    maxCount = 0
    for row in imageLabels:
        maxCount = max(maxCount, len(row))
    leftTextHeight = (textHeight + textGap) * maxCount
    # make the canvas as wide as the panels height
    leftTextWidth = panelCanvas.size[1]
    size = (leftTextWidth, leftTextHeight)
    textCanvas = Image.new(mode, size, white)
    textdraw = ImageDraw.Draw(textCanvas)

    labels = imageLabels[rowIndex]
    py = leftTextHeight - textGap  # start at bottom
    for l, label in enumerate(labels):
        py = py - textHeight    # find the top of this row
        w = textdraw.textsize(label, font=font)[0]
        inset = int((leftTextWidth - w) / 2)
        textdraw.text((inset, py), label, font=font, fill=(0, 0, 0))
        py = py - textGap    # add space between rows

    # make a canvas big-enough to add text to the images.
    canvasWidth = leftTextHeight + panelCanvas.size[0]
    # TextHeight will be width once rotated
    canvasHeight = panelCanvas.size[1]
    size = (canvasWidth, canvasHeight)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    # add the panels to the canvas
    pasteX = leftTextHeight
    pasteY = 0
    imgUtil.pasteImage(panelCanvas, canvas, pasteX, pasteY)

    # add text to rows
    # want it to be vertical. Rotate and paste the text canvas from above
    if imageLabels:
        textV = textCanvas.rotate(90)
        imgUtil.pasteImage(textV, canvas, spacer/2, 0)

    return canvas
Пример #7
0
def paintDatasetCanvas(conn, images, title, tagIds=None, showUntagged=False,
                       colCount=10, length=100):
    """
        Paints and returns a canvas of thumbnails from images, laid out in a
        set number of columns.
        Title and date-range of the images is printed above the thumbnails,
        to the left and right, respectively.

        @param conn:        Blitz connection
        @param imageIds:    Image IDs
        @param title:       title to display at top of figure. String
        @param tagIds:      Optional to sort thumbnails by tag. [long]
        @param colCount:    Max number of columns to lay out thumbnails
        @param length:      Length of longest side of thumbnails
    """

    mode = "RGB"
    figCanvas = None
    spacing = length/40 + 2

    thumbnailStore = conn.createThumbnailStore()
    # returns  omero.api.ThumbnailStorePrx
    metadataService = conn.getMetadataService()

    if len(images) == 0:
        return None
    timestampMin = images[0].getDate()   # datetime
    timestampMax = timestampMin

    dsImageIds = []
    imagePixelMap = {}
    imageNames = {}

    # sort the images by name
    images.sort(key=lambda x: (x.getName().lower()))

    for image in images:
        imageId = image.getId()
        pixelId = image.getPrimaryPixels().getId()
        name = image.getName()
        dsImageIds.append(imageId)        # make a list of image-IDs
        imagePixelMap[imageId] = pixelId    # and a map of image-ID: pixel-ID
        imageNames[imageId] = name
        timestampMin = min(timestampMin, image.getDate())
        timestampMax = max(timestampMax, image.getDate())

    # set-up fonts
    fontsize = length/7 + 5
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    topSpacer = spacing + textHeight
    leftSpacer = spacing + textHeight

    tagPanes = []
    maxWidth = 0
    totalHeight = topSpacer

    # if we have a list of tags, then sort images by tag
    if tagIds:
        # Cast to int since List can be any type
        tagIds = [int(tagId) for tagId in tagIds]
        log(" Sorting images by tags: %s" % tagIds)
        tagNames = {}
        taggedImages = {}    # a map of tagId: list-of-image-Ids
        imgTags = {}        # a map of imgId: list-of-tagIds
        for tagId in tagIds:
            taggedImages[tagId] = []

        # look for images that have a tag
        types = ["ome.model.annotations.TagAnnotation"]
        annotations = metadataService.loadAnnotations(
            "Image", dsImageIds, types, None, None)
        # filter images by annotation...
        for imageId, tags in annotations.items():
            imgTagIds = []
            for tag in tags:
                tagId = tag.getId().getValue()
                # make a dict of tag-names
                tagNames[tagId] = tag.getTextValue().getValue()
                print "     Tag:", tagId, tagId in tagIds
                imgTagIds.append(tagId)
            imgTags[imageId] = imgTagIds

        # get a sorted list of {'iid': iid, 'tagKey': tagKey,
        # 'tagIds':orderedTags}
        sortedThumbs = sortImagesByTag(tagIds, imgTags)

        if not showUntagged:
            sortedThumbs = [t for t in sortedThumbs if len(t['tagIds']) > 0]

        # Need to group sets of thumbnails by FIRST tag.
        toptagSets = []
        groupedPixelIds = []
        showSubsetLabels = False
        currentTagStr = None
        for i, img in enumerate(sortedThumbs):
            tagIds = img['tagIds']
            if len(tagIds) == 0:
                tagString = "Not Tagged"
            else:
                tagString = tagNames[tagIds[0]]
            if tagString == currentTagStr or currentTagStr is None:
                # only show subset labels (later) if there are more than 1
                # subset
                if (len(tagIds) > 1):
                    showSubsetLabels = True
                groupedPixelIds.append({
                    'pid': imagePixelMap[img['iid']],
                    'tagIds': tagIds})
            else:
                toptagSets.append({
                    'tagText': currentTagStr,
                    'pixelIds': groupedPixelIds,
                    'showSubsetLabels': showSubsetLabels})
                showSubsetLabels = len(tagIds) > 1
                groupedPixelIds = [{
                    'pid': imagePixelMap[img['iid']],
                    'tagIds': tagIds}]
            currentTagStr = tagString
        toptagSets.append({
            'tagText': currentTagStr,
            'pixelIds': groupedPixelIds,
            'showSubsetLabels': showSubsetLabels})

        # Find the indent we need
        maxTagNameWidth = max([font.getsize(ts['tagText'])[0]
                               for ts in toptagSets])
        if showUntagged:
            maxTagNameWidth = max(maxTagNameWidth,
                                  font.getsize("Not Tagged")[0])

        print "toptagSets", toptagSets

        tagSubPanes = []

        # make a canvas for each tag combination
        def makeTagsetCanvas(tagString, tagsetPixIds, showSubsetLabels):
            log(" Tagset: %s  (contains %d images)"
                % (tagString, len(tagsetPixIds)))
            if not showSubsetLabels:
                tagString = None
            subCanvas = imgUtil.paintThumbnailGrid(
                thumbnailStore, length,
                spacing, tagsetPixIds, colCount, topLabel=tagString)
            tagSubPanes.append(subCanvas)

        for toptagSet in toptagSets:
            tagText = toptagSet['tagText']
            showSubsetLabels = toptagSet['showSubsetLabels']
            imageData = toptagSet['pixelIds']
            # loop through all thumbs under TAG, grouping into subsets.
            tagsetPixIds = []
            currentTagStr = None
            for i, img in enumerate(imageData):
                tag_ids = img['tagIds']
                pid = img['pid']
                tagString = ", ".join([tagNames[tid] for tid in tag_ids])
                if tagString == "":
                    tagString = "Not Tagged"
                # Keep grouping thumbs under similar tag set (if not on the
                # last loop)
                if tagString == currentTagStr or currentTagStr is None:
                    tagsetPixIds.append(pid)
                else:
                    # Process thumbs added so far
                    makeTagsetCanvas(currentTagStr, tagsetPixIds,
                                     showSubsetLabels)
                    # reset for next tagset
                    tagsetPixIds = [pid]
                currentTagStr = tagString

            makeTagsetCanvas(currentTagStr, tagsetPixIds, showSubsetLabels)

            maxWidth = max([c.size[0] for c in tagSubPanes])
            totalHeight = sum([c.size[1] for c in tagSubPanes])

            # paste them into a single canvas for each Tag

            leftSpacer = spacing + maxTagNameWidth + 2*spacing
            # Draw vertical line to right
            size = (leftSpacer + maxWidth, totalHeight)
            tagCanvas = Image.new(mode, size, WHITE)
            pX = leftSpacer
            pY = 0
            for pane in tagSubPanes:
                imgUtil.pasteImage(pane, tagCanvas, pX, pY)
                pY += pane.size[1]
            if tagText is not None:
                draw = ImageDraw.Draw(tagCanvas)
                tt_w, tt_h = font.getsize(tagText)
                h_offset = (totalHeight - tt_h)/2
                draw.text((spacing, h_offset), tagText, font=font,
                          fill=(50, 50, 50))
            # draw vertical line
            draw.line((leftSpacer-spacing, 0, leftSpacer - spacing,
                       totalHeight), fill=(0, 0, 0))
            tagPanes.append(tagCanvas)
            tagSubPanes = []
    else:
        leftSpacer = spacing
        pixelIds = []
        for imageId in dsImageIds:
            log("  Name: %s  ID: %d" % (imageNames[imageId], imageId))
            pixelIds.append(imagePixelMap[imageId])
        figCanvas = imgUtil.paintThumbnailGrid(
            thumbnailStore, length, spacing, pixelIds, colCount)
        tagPanes.append(figCanvas)

    # paste them into a single canvas
    tagsetSpacer = length / 3
    maxWidth = max([c.size[0] for c in tagPanes])
    totalHeight = totalHeight + sum([c.size[1]+tagsetSpacer
                                     for c in tagPanes]) - tagsetSpacer
    size = (maxWidth, totalHeight)
    fullCanvas = Image.new(mode, size, WHITE)
    pX = 0
    pY = topSpacer
    for pane in tagPanes:
        imgUtil.pasteImage(pane, fullCanvas, pX, pY)
        pY += pane.size[1] + tagsetSpacer

    # create dates for the image timestamps. If dates are not the same, show
    # first - last.
    # firstdate = timestampMin
    # lastdate = timestampMax
    # figureDate = str(firstdate)
    # if firstdate != lastdate:
    #     figureDate = "%s - %s" % (firstdate, lastdate)

    draw = ImageDraw.Draw(fullCanvas)
    # dateWidth = draw.textsize(figureDate, font=font)[0]
    # titleWidth = draw.textsize(title, font=font)[0]
    dateY = spacing
    # dateX = fullCanvas.size[0] - spacing - dateWidth
    draw.text((leftSpacer, dateY), title, font=font, fill=(0, 0, 0))  # title
    # Don't show dates: see
    # https://github.com/openmicroscopy/openmicroscopy/pull/1002
    # if (leftSpacer+titleWidth) < dateX:
    # if there's enough space...
    #     draw.text((dateX, dateY), figureDate, font=font, fill=(0,0,0))
    # add date

    return fullCanvas
Пример #8
0
def getROImovieView(re, queryService, pixels, timeShapeMap, mergedIndexes,
                    mergedColours, roiWidth, roiHeight, roiZoom, spacer=12,
                    algorithm=None, stepping=1, fontsize=24, maxColumns=None,
                    showRoiDuration=False):

    """
    This takes a ROI rectangle from an image and makes a movie canvas of the
    region in the ROI, zoomed by a defined factor.
    """

    mode = "RGB"
    white = (255, 255, 255)

    sizeX = pixels.getSizeX().getValue()
    sizeY = pixels.getSizeY().getValue()
    sizeZ = pixels.getSizeZ().getValue()
    sizeC = pixels.getSizeC().getValue()
    sizeT = pixels.getSizeT().getValue()

    if pixels.getPhysicalSizeX():
        physicalX = pixels.getPhysicalSizeX().getValue()
    else:
        physicalX = 0
    if pixels.getPhysicalSizeY():
        physicalY = pixels.getPhysicalSizeY().getValue()
    else:
        physicalY = 0
    log("  Pixel size (um): x: %s  y: %s" % (str(physicalX), str(physicalY)))
    log("  Image dimensions (pixels): x: %d  y: %d" % (sizeX, sizeY))
    log(" Projecting Movie Frame ROIs...")

    # set up rendering engine with the pixels
    pixelsId = pixels.getId().getValue()
    re.lookupPixels(pixelsId)
    if not re.lookupRenderingDef(pixelsId):
        re.resetDefaults()
    if not re.lookupRenderingDef(pixelsId):
        raise "Failed to lookup Rendering Def"
    re.load()

    # now get each channel in greyscale (or colour)
    # a list of renderedImages (data as Strings) for the split-view row
    renderedImages = []
    panelWidth = 0
    channelMismatch = False
    # first, turn off all channels in pixels
    for i in range(sizeC):
        re.setActive(i, False)

    # turn on channels in mergedIndexes.
    for i in mergedIndexes:
        if i >= sizeC or i < 0:
            channelMismatch = True
        else:
            print "Turning on channel:", i
            re.setActive(i, True)
            if i in mergedColours:
                rgba = mergedColours[i]
                print "Setting rgba", rgba
                re.setRGBA(i, *rgba)

    # get the combined image, using the existing rendering settings
    channelsString = ", ".join([str(i) for i in mergedIndexes])
    log("  Rendering Movie channels: %s" % channelsString)

    timeIndexes = list(timeShapeMap.keys())
    timeIndexes.sort()

    if showRoiDuration:
        log(" Timepoints shown are ROI duration, not from start of movie")
    timeLabels = figUtil.getTimeLabels(
        queryService, pixelsId, timeIndexes, sizeT, None, showRoiDuration)
    # The last value of the list will be the Units used to display time
    print "Time label units are:", timeLabels[-1]

    fullFirstFrame = None
    for t, timepoint in enumerate(timeIndexes):
        roiX, roiY, proStart, proEnd = timeShapeMap[timepoint]
        box = (roiX, roiY, int(roiX+roiWidth), int(roiY+roiHeight))
        log("  Time-index: %d Time-label: %s  Projecting z range: %d - %d "
            "(max Z is %d) of region x: %s y: %s"
            % (timepoint+1, timeLabels[t], proStart+1, proEnd+1, sizeZ, roiX,
               roiY))

        merged = re.renderProjectedCompressed(
            algorithm, timepoint, stepping, proStart, proEnd)
        fullMergedImage = Image.open(StringIO.StringIO(merged))
        if fullFirstFrame is None:
            fullFirstFrame = fullMergedImage
        roiMergedImage = fullMergedImage.crop(box)
        # make sure this is not just a lazy copy of the full image
        roiMergedImage.load()
        if roiZoom is not 1:
            newSize = (int(roiWidth*roiZoom), int(roiHeight*roiZoom))
            roiMergedImage = roiMergedImage.resize(newSize)
        panelWidth = roiMergedImage.size[0]
        renderedImages.append(roiMergedImage)

    if channelMismatch:
        log(" WARNING channel mismatch: The current image has fewer channels"
            " than the primary image.")

    # now assemble the roi split-view canvas, with space above for text
    colCount = len(renderedImages)
    rowCount = 1
    if maxColumns:
        rowCount = colCount / maxColumns
        if (colCount % maxColumns) > 0:
            rowCount += 1
        colCount = maxColumns
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    # no spaces around panels
    canvasWidth = ((panelWidth + spacer) * colCount) - spacer
    rowHeight = renderedImages[0].size[1] + spacer + textHeight
    canvasHeight = rowHeight * rowCount
    size = (canvasWidth, canvasHeight)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    px = 0
    textY = spacer/2
    panelY = textHeight + spacer
    # paste the images in, with time labels
    draw = ImageDraw.Draw(canvas)

    col = 0
    for i, img in enumerate(renderedImages):
        label = timeLabels[i]
        indent = (panelWidth - (font.getsize(label)[0])) / 2
        draw.text((px+indent, textY), label, font=font, fill=(0, 0, 0))
        imgUtil.pasteImage(img, canvas, px, panelY)
        if col == (colCount - 1):
            col = 0
            px = 0
            textY += rowHeight
            panelY += rowHeight
        else:
            col += 1
            px = px + panelWidth + spacer

    # return the roi splitview canvas, as well as the full merged image
    return (canvas, fullFirstFrame, textHeight + spacer)
Пример #9
0
def createMovieFigure(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer,
                            algorithm, stepping, scalebar, overlayColour, timeUnits, imageLabels):
    """
    Makes the complete Movie figure: A canvas showing an image per row with multiple columns showing 
    frames from each image/movie. Labels obove each frame to show the time-stamp of that frame in the 
    specified units and labels on the left name each image. 
    
    @param session        The OMERO session
    @param pixelIds        A list of the Pixel IDs for the images in the figure
    @param tIndexes        A list of tIndexes to display frames from
    @param zStart        Projection Z-start
    @param zEnd            Projection Z-end
    @param width        Maximum width of panels
    @param height        Max height of panels
    @param spacer        Space between panels
    @param algorithm    Projection algorithm e.g. "MAXIMUMINTENSITY"
    @param stepping        Projecttion z-step
    @param scalebar        A number of microns for scale-bar
    @param overlayColour     Colour of the scale-bar as tuple (255,255,255)
    @param timeUnits    A string such as "SECS"
    @param imageLabels    A list of lists, corresponding to pixelIds, for labelling each image with one or more strings.
    """

    panelCanvas = getImageFrames(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer,
                            algorithm, stepping, scalebar, overlayColour, timeUnits)
                    
    # add lables to row...
    mode = "RGB"
    white = (255,255,255)
    font = imgUtil.getFont(width/12)
    textHeight = font.getsize("Sampleq")[1]
    textGap = spacer /2
    rowSpacing = panelCanvas.size[1]/len(pixelIds)
    
    # find max number of labels
    maxCount = 0 
    rowHeights = []
    for row in imageLabels:
        maxCount = max(maxCount, len(row))
    leftTextWidth = (textHeight + textGap) * maxCount
    size = (panelCanvas.size[1], leftTextWidth)    # make the canvas as wide as the panels height
    textCanvas = Image.new(mode, size, white)
    textdraw = ImageDraw.Draw(textCanvas)
    px = spacer
    imageLabels.reverse()
    for row in imageLabels:
        py = leftTextWidth - textGap # start at bottom
        for l, label in enumerate(row):
            py = py - textHeight    # find the top of this row
            w = textdraw.textsize(label, font=font) [0]
            inset = int((height - w) / 2)
            textdraw.text((px+inset, py), label, font=font, fill=(0,0,0))
            py = py - textGap    # add space between rows
        px = px + rowSpacing         # 2 spacers between each row
        

    # make a canvas big-enough to add text to the images. 
    canvasWidth = leftTextWidth + panelCanvas.size[0]
    canvasHeight = panelCanvas.size[1]
    size = (canvasWidth, canvasHeight)
    canvas = Image.new(mode, size, white)        # create a canvas of appropriate width, height
    
    # add the panels to the canvas 
    pasteX = leftTextWidth
    pasteY = 0
    imgUtil.pasteImage(panelCanvas, canvas, pasteX, pasteY)
    
    # add text to rows
    # want it to be vertical. Rotate and paste the text canvas from above
    if imageLabels:    
        textV = textCanvas.rotate(90)
        imgUtil.pasteImage(textV, canvas, spacer/2, 0)
            
    return canvas
Пример #10
0
def getROImovieView(re,
                    queryService,
                    pixels,
                    timeShapeMap,
                    mergedIndexes,
                    mergedColours,
                    roiWidth,
                    roiHeight,
                    roiZoom,
                    spacer=12,
                    algorithm=None,
                    stepping=1,
                    fontsize=24,
                    maxColumns=None,
                    showRoiDuration=False):
    """
    This takes a ROI rectangle from an image and makes a movie canvas of the
    region in the ROI, zoomed by a defined factor.
    """

    mode = "RGB"
    white = (255, 255, 255)

    sizeX = pixels.getSizeX().getValue()
    sizeY = pixels.getSizeY().getValue()
    sizeZ = pixels.getSizeZ().getValue()
    sizeC = pixels.getSizeC().getValue()
    sizeT = pixels.getSizeT().getValue()

    if pixels.getPhysicalSizeX():
        physicalX = pixels.getPhysicalSizeX().getValue()
    else:
        physicalX = 0
    if pixels.getPhysicalSizeY():
        physicalY = pixels.getPhysicalSizeY().getValue()
    else:
        physicalY = 0
    log("  Pixel size (um): x: %s  y: %s" % (str(physicalX), str(physicalY)))
    log("  Image dimensions (pixels): x: %d  y: %d" % (sizeX, sizeY))
    log(" Projecting Movie Frame ROIs...")

    # set up rendering engine with the pixels
    pixelsId = pixels.getId().getValue()
    re.lookupPixels(pixelsId)
    if not re.lookupRenderingDef(pixelsId):
        re.resetDefaults()
    if not re.lookupRenderingDef(pixelsId):
        raise "Failed to lookup Rendering Def"
    re.load()

    # now get each channel in greyscale (or colour)
    # a list of renderedImages (data as Strings) for the split-view row
    renderedImages = []
    panelWidth = 0
    channelMismatch = False
    # first, turn off all channels in pixels
    for i in range(sizeC):
        re.setActive(i, False)

    # turn on channels in mergedIndexes.
    for i in mergedIndexes:
        if i >= sizeC or i < 0:
            channelMismatch = True
        else:
            print "Turning on channel:", i
            re.setActive(i, True)
            if i in mergedColours:
                rgba = mergedColours[i]
                print "Setting rgba", rgba
                re.setRGBA(i, *rgba)

    # get the combined image, using the existing rendering settings
    channelsString = ", ".join([str(i) for i in mergedIndexes])
    log("  Rendering Movie channels: %s" % channelsString)

    timeIndexes = list(timeShapeMap.keys())
    timeIndexes.sort()

    if showRoiDuration:
        log(" Timepoints shown are ROI duration, not from start of movie")
    timeLabels = figUtil.getTimeLabels(queryService, pixelsId, timeIndexes,
                                       sizeT, None, showRoiDuration)
    # The last value of the list will be the Units used to display time
    print "Time label units are:", timeLabels[-1]

    fullFirstFrame = None
    for t, timepoint in enumerate(timeIndexes):
        roiX, roiY, proStart, proEnd = timeShapeMap[timepoint]
        box = (roiX, roiY, int(roiX + roiWidth), int(roiY + roiHeight))
        log("  Time-index: %d Time-label: %s  Projecting z range: %d - %d "
            "(max Z is %d) of region x: %s y: %s" %
            (timepoint + 1, timeLabels[t], proStart + 1, proEnd + 1, sizeZ,
             roiX, roiY))

        merged = re.renderProjectedCompressed(algorithm, timepoint, stepping,
                                              proStart, proEnd)
        fullMergedImage = Image.open(StringIO.StringIO(merged))
        if fullFirstFrame is None:
            fullFirstFrame = fullMergedImage
        roiMergedImage = fullMergedImage.crop(box)
        # make sure this is not just a lazy copy of the full image
        roiMergedImage.load()
        if roiZoom is not 1:
            newSize = (int(roiWidth * roiZoom), int(roiHeight * roiZoom))
            roiMergedImage = roiMergedImage.resize(newSize)
        panelWidth = roiMergedImage.size[0]
        renderedImages.append(roiMergedImage)

    if channelMismatch:
        log(" WARNING channel mismatch: The current image has fewer channels"
            " than the primary image.")

    # now assemble the roi split-view canvas, with space above for text
    colCount = len(renderedImages)
    rowCount = 1
    if maxColumns:
        rowCount = colCount / maxColumns
        if (colCount % maxColumns) > 0:
            rowCount += 1
        colCount = maxColumns
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    # no spaces around panels
    canvasWidth = ((panelWidth + spacer) * colCount) - spacer
    rowHeight = renderedImages[0].size[1] + spacer + textHeight
    canvasHeight = rowHeight * rowCount
    size = (canvasWidth, canvasHeight)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    px = 0
    textY = spacer / 2
    panelY = textHeight + spacer
    # paste the images in, with time labels
    draw = ImageDraw.Draw(canvas)

    col = 0
    for i, img in enumerate(renderedImages):
        label = timeLabels[i]
        indent = (panelWidth - (font.getsize(label)[0])) / 2
        draw.text((px + indent, textY), label, font=font, fill=(0, 0, 0))
        imgUtil.pasteImage(img, canvas, px, panelY)
        if col == (colCount - 1):
            col = 0
            px = 0
            textY += rowHeight
            panelY += rowHeight
        else:
            col += 1
            px = px + panelWidth + spacer

    # return the roi splitview canvas, as well as the full merged image
    return (canvas, fullFirstFrame, textHeight + spacer)
Пример #11
0
def createMovieFigure(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer,
                            algorithm, stepping, scalebar, overlayColour, timeUnits, imageLabels):
    """
    Makes the complete Movie figure: A canvas showing an image per row with multiple columns showing 
    frames from each image/movie. Labels obove each frame to show the time-stamp of that frame in the 
    specified units and labels on the left name each image. 
    
    @param session        The OMERO session
    @param pixelIds        A list of the Pixel IDs for the images in the figure
    @param tIndexes        A list of tIndexes to display frames from
    @param zStart        Projection Z-start
    @param zEnd            Projection Z-end
    @param width        Maximum width of panels
    @param height        Max height of panels
    @param spacer        Space between panels
    @param algorithm    Projection algorithm e.g. "MAXIMUMINTENSITY"
    @param stepping        Projecttion z-step
    @param scalebar        A number of microns for scale-bar
    @param overlayColour     Colour of the scale-bar as tuple (255,255,255)
    @param timeUnits    A string such as "SECS"
    @param imageLabels    A list of lists, corresponding to pixelIds, for labelling each image with one or more strings.
    """

    panelCanvas = getImageFrames(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer,
                            algorithm, stepping, scalebar, overlayColour, timeUnits)
                    
    # add lables to row...
    mode = "RGB"
    white = (255,255,255)
    font = imgUtil.getFont(width/12)
    textHeight = font.getsize("Sampleq")[1]
    textGap = spacer /2
    rowSpacing = panelCanvas.size[1]/len(pixelIds)
    
    # find max number of labels
    maxCount = 0 
    rowHeights = []
    for row in imageLabels:
        maxCount = max(maxCount, len(row))
    leftTextWidth = (textHeight + textGap) * maxCount
    size = (panelCanvas.size[1], leftTextWidth)    # make the canvas as wide as the panels height
    textCanvas = Image.new(mode, size, white)
    textdraw = ImageDraw.Draw(textCanvas)
    px = spacer
    imageLabels.reverse()
    for row in imageLabels:
        py = leftTextWidth - textGap # start at bottom
        for l, label in enumerate(row):
            py = py - textHeight    # find the top of this row
            w = textdraw.textsize(label, font=font) [0]
            inset = int((height - w) / 2)
            textdraw.text((px+inset, py), label, font=font, fill=(0,0,0))
            py = py - textGap    # add space between rows
        px = px + rowSpacing         # 2 spacers between each row
        

    # make a canvas big-enough to add text to the images. 
    canvasWidth = leftTextWidth + panelCanvas.size[0]
    canvasHeight = panelCanvas.size[1]
    size = (canvasWidth, canvasHeight)
    canvas = Image.new(mode, size, white)        # create a canvas of appropriate width, height
    
    # add the panels to the canvas 
    pasteX = leftTextWidth
    pasteY = 0
    imgUtil.pasteImage(panelCanvas, canvas, pasteX, pasteY)
    
    # add text to rows
    # want it to be vertical. Rotate and paste the text canvas from above
    if imageLabels:    
        textV = textCanvas.rotate(90)
        imgUtil.pasteImage(textV, canvas, spacer/2, 0)
            
    return canvas
Пример #12
0
def paintDatasetCanvas(conn,
                       images,
                       title,
                       tagIds=None,
                       showUntagged=False,
                       colCount=10,
                       length=100):
    """
        Paints and returns a canvas of thumbnails from images, laid out in a set number of columns. 
        Title and date-range of the images is printed above the thumbnails,
        to the left and right, respectively. 
        
        @param conn:        Blitz connection
        @param imageIds:    Image IDs
        @param title:       title to display at top of figure. String
        @param tagIds:      Optional to sort thumbnails by tag. [long]
        @param colCount:    Max number of columns to lay out thumbnails 
        @param length:      Length of longest side of thumbnails
    """

    mode = "RGB"
    figCanvas = None
    spacing = length / 40 + 2

    thumbnailStore = conn.createThumbnailStore(
    )  # returns  omero.api.ThumbnailStorePrx
    metadataService = conn.getMetadataService()

    if len(images) == 0:
        return None
    timestampMin = images[0].getDate()  # datetime
    timestampMax = timestampMin

    dsImageIds = []
    imagePixelMap = {}
    imageNames = {}

    # sort the images by name
    images.sort(key=lambda x: (x.getName().lower()))

    for image in images:
        imageId = image.getId()
        pixelId = image.getPrimaryPixels().getId()
        name = image.getName()
        dsImageIds.append(imageId)  # make a list of image-IDs
        imagePixelMap[imageId] = pixelId  # and a map of image-ID: pixel-ID
        imageNames[imageId] = name
        timestampMin = min(timestampMin, image.getDate())
        timestampMax = max(timestampMax, image.getDate())

    # set-up fonts
    fontsize = length / 7 + 5
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    topSpacer = spacing + textHeight
    leftSpacer = spacing + textHeight

    tagPanes = []
    maxWidth = 0
    totalHeight = topSpacer

    # if we have a list of tags, then sort images by tag
    if tagIds:
        log(" Sorting images by tags")
        tagNames = {}
        taggedImages = {}  # a map of tagId: list-of-image-Ids
        for tagId in tagIds:
            taggedImages[tagId] = []

        # look for images that have a tag
        types = ["ome.model.annotations.TagAnnotation"]
        annotations = metadataService.loadAnnotations("Image", dsImageIds,
                                                      types, None, None)
        #filter images by annotation...
        for imageId, tags in annotations.items():
            for tag in tags:
                tagId = tag.getId().getValue()
                if tagId in tagIds:  # if image has multiple tags, it will be display more than once
                    taggedImages[tagId].append(
                        imageId)  # add the image id to the appropriate list
                    if imageId in dsImageIds:
                        dsImageIds.remove(
                            imageId)  # remember which we've picked already
                    if tagId not in tagNames.keys():
                        tagNames[tagId] = tag.getTextValue().getValue(
                        )  # make a dict of tag-names

        # if we want to show remaining images in dataset (not picked by tag)...
        if showUntagged:
            tagIds.append("noTag")
            taggedImages["noTag"] = [untaggedId for untaggedId in dsImageIds]
            tagNames["noTag"] = "Untagged"

        # print results and convert image-id to pixel-id
        # make a canvas for each tag
        for tagId in tagIds:
            if tagId not in tagNames.keys():  # no images with this tag
                continue
            leftLabel = tagNames[tagId]
            log(" Tag: %s  (contains %d images)" %
                (leftLabel, len(taggedImages[tagId])))
            pixelIds = []
            for imageId in taggedImages[tagId]:
                log("  Name: %s  ID: %d" % (imageNames[imageId], imageId))
                pixelIds.append(imagePixelMap[imageId])
            print 'pixelIds', pixelIds
            tagCanvas = imgUtil.paintThumbnailGrid(thumbnailStore,
                                                   length,
                                                   spacing,
                                                   pixelIds,
                                                   colCount,
                                                   leftLabel=leftLabel)
            tagPanes.append(tagCanvas)
            maxWidth = max(maxWidth, tagCanvas.size[0])
            totalHeight += tagCanvas.size[1]

    else:
        leftSpacer = spacing
        pixelIds = []
        for imageId in dsImageIds:
            log("  Name: %s  ID: %d" % (imageNames[imageId], imageId))
            pixelIds.append(imagePixelMap[imageId])
        figCanvas = imgUtil.paintThumbnailGrid(thumbnailStore, length, spacing,
                                               pixelIds, colCount)
        tagPanes.append(figCanvas)
        maxWidth = max(maxWidth, figCanvas.size[0])
        totalHeight += figCanvas.size[1]

    # paste them into a single canvas
    size = (maxWidth, totalHeight)
    fullCanvas = Image.new(mode, size, WHITE)
    pX = 0
    pY = topSpacer
    for pane in tagPanes:
        imgUtil.pasteImage(pane, fullCanvas, pX, pY)
        pY += pane.size[1]

    # create dates for the image timestamps. If dates are not the same, show first - last.
    firstdate = timestampMin
    lastdate = timestampMax
    figureDate = str(firstdate)
    if firstdate != lastdate:
        figureDate = "%s - %s" % (firstdate, lastdate)

    draw = ImageDraw.Draw(fullCanvas)
    dateWidth = draw.textsize(figureDate, font=font)[0]
    titleWidth = draw.textsize(title, font=font)[0]
    dateY = spacing
    dateX = fullCanvas.size[0] - spacing - dateWidth
    draw.text((leftSpacer, dateY), title, font=font, fill=(0, 0, 0))  # title
    if (leftSpacer + titleWidth) < dateX:  # if there's enough space...
        draw.text((dateX, dateY), figureDate, font=font,
                  fill=(0, 0, 0))  # add date

    return fullCanvas
def paintDatasetCanvas(conn, images, title, tagIds=None, showUntagged = False, colCount = 10, length = 100):
    """
        Paints and returns a canvas of thumbnails from images, laid out in a set number of columns. 
        Title and date-range of the images is printed above the thumbnails,
        to the left and right, respectively. 
        
        @param conn:        Blitz connection
        @param imageIds:    Image IDs
        @param title:       title to display at top of figure. String
        @param tagIds:      Optional to sort thumbnails by tag. [long]
        @param colCount:    Max number of columns to lay out thumbnails 
        @param length:      Length of longest side of thumbnails
    """
    
    mode = "RGB"
    figCanvas = None
    spacing = length/40 + 2
    
    thumbnailStore = conn.createThumbnailStore()        # returns  omero.api.ThumbnailStorePrx
    metadataService = conn.getMetadataService()
    
    if len(images) == 0:
        return None
    timestampMin = images[0].getDate()   # datetime
    timestampMax = timestampMin
    
    dsImageIds = []
    imagePixelMap = {}
    imageNames = {}
    
    # sort the images by name
    images.sort(key=lambda x:(x.getName().lower()))
    
    for image in images:
        imageId = image.getId()
        pixelId = image.getPrimaryPixels().getId()
        name = image.getName()
        dsImageIds.append(imageId)        # make a list of image-IDs
        imagePixelMap[imageId] = pixelId    # and a map of image-ID: pixel-ID
        imageNames[imageId] = name
        timestampMin = min(timestampMin, image.getDate())
        timestampMax = max(timestampMax, image.getDate())
    
    # set-up fonts
    fontsize = length/7 + 5
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    topSpacer = spacing + textHeight
    leftSpacer = spacing + textHeight
    
    tagPanes = []
    maxWidth = 0
    totalHeight = topSpacer
    
    # if we have a list of tags, then sort images by tag 
    if tagIds:
        log(" Sorting images by tags")
        tagNames = {}
        taggedImages = {}    # a map of tagId: list-of-image-Ids
        for tagId in tagIds:
            taggedImages[tagId] = []
        
        # look for images that have a tag
        types = ["ome.model.annotations.TagAnnotation"]
        annotations = metadataService.loadAnnotations("Image", dsImageIds, types, None, None)
        #filter images by annotation...
        for imageId, tags in annotations.items():
            for tag in tags:
                tagId = tag.getId().getValue()
                if tagId in tagIds:        # if image has multiple tags, it will be display more than once
                    taggedImages[tagId].append(imageId)        # add the image id to the appropriate list
                    if imageId in dsImageIds:
                        dsImageIds.remove(imageId)                # remember which we've picked already
                    if tagId not in tagNames.keys():
                        tagNames[tagId] = tag.getTextValue().getValue()        # make a dict of tag-names
        
        # if we want to show remaining images in dataset (not picked by tag)...
        if showUntagged:
            tagIds.append("noTag")
            taggedImages["noTag"] = [untaggedId for untaggedId in dsImageIds]
            tagNames["noTag"] = "Untagged"
        
        # print results and convert image-id to pixel-id
        # make a canvas for each tag
        for tagId in tagIds:
            if tagId not in tagNames.keys():    # no images with this tag
                continue
            leftLabel = tagNames[tagId]
            log(" Tag: %s  (contains %d images)" % (leftLabel, len(taggedImages[tagId])))
            pixelIds = []
            for imageId in taggedImages[tagId]:
                log("  Name: %s  ID: %d" % (imageNames[imageId], imageId))
                pixelIds.append(imagePixelMap[imageId])
            print 'pixelIds', pixelIds
            tagCanvas = imgUtil.paintThumbnailGrid(thumbnailStore, length, spacing, pixelIds, colCount, leftLabel=leftLabel)
            tagPanes.append(tagCanvas)
            maxWidth = max(maxWidth, tagCanvas.size[0])
            totalHeight += tagCanvas.size[1]
    
    else:
        leftSpacer = spacing
        pixelIds = []
        for imageId in dsImageIds:
            log("  Name: %s  ID: %d" % (imageNames[imageId], imageId))
            pixelIds.append(imagePixelMap[imageId])
        figCanvas = imgUtil.paintThumbnailGrid(thumbnailStore, length, spacing, pixelIds, colCount)
        tagPanes.append(figCanvas)
        maxWidth = max(maxWidth, figCanvas.size[0])
        totalHeight += figCanvas.size[1]
    
    # paste them into a single canvas
    size = (maxWidth, totalHeight)
    fullCanvas = Image.new(mode, size, WHITE)
    pX = 0
    pY = topSpacer
    for pane in tagPanes:
        imgUtil.pasteImage(pane, fullCanvas, pX, pY)
        pY += pane.size[1]
        
    # create dates for the image timestamps. If dates are not the same, show first - last. 
    firstdate = timestampMin
    lastdate = timestampMax
    figureDate = str(firstdate)
    if firstdate != lastdate:
        figureDate = "%s - %s" % (firstdate, lastdate)

    draw = ImageDraw.Draw(fullCanvas)
    dateWidth = draw.textsize(figureDate, font=font) [0]
    titleWidth = draw.textsize(title, font=font) [0]
    dateY = spacing
    dateX = fullCanvas.size[0] - spacing - dateWidth
    draw.text((leftSpacer, dateY), title, font=font, fill=(0,0,0))        # title
    if (leftSpacer+titleWidth) < dateX:            # if there's enough space...
        draw.text((dateX, dateY), figureDate, font=font, fill=(0,0,0))    # add date 
    
    return fullCanvas
Пример #14
0
def makeSplitViewFigure(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames, colourChannels, 
                mergedIndexes, mergedColours, mergedNames, width, height, imageLabels = None, algorithm = None, stepping = 1, 
                scalebar=None, overlayColour=(255,255,255)):

    """ This method makes a figure of a number of images, arranged in rows with each row being the split-view
    of a single image. The channels are arranged left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the server, but it's channels will be
    turned on/off according to @mergedIndexes. 
    The colour of each channel turned white if colourChannels is false or the channel is not in the merged image.
    Otherwise channel is changed to mergedColours[i]
    Text is added at the top of the figure, to display channel names above each column, and the 
    combined image may have it's various channels named in coloured text. The optional imageLabels is a list 
    of string lists for naming the images at the left of the figure (Each image may have 0 or multiple labels).
    
    The figure is returned as a PIL 'Image' 
    
    @ session    session for server access
    @ pixelIds        a list of the Ids for the pixels we want to display
    @ zStart        the start of Z-range for projection
    @ zEnd             the end of Z-range for projection
    @ splitIndexes     a list of the channel indexes to display. Same channels for each image/row
    @ channelNames         map of index:name to go above the columns for each split channel
    @ colourChannels     true if split channels are 
    @ mergedIndexes        list (or set) of channels in the merged image 
    @ mergedColours     index: colour map of channels in the merged image
    @ mergedNames        if true, label with merged panel with channel names (otherwise, label "Merged")
    @ width             the width of primary image (all images zoomed to this height)
    @ height            the height of primary image
    @ imageLabels         optional list of string lists.
    @ algorithm            for projection MAXIMUMINTENSITY or MEANINTENSITY
    @ stepping            projection increment 
    """
    
    fontsize = 12
    if width > 500:
        fontsize = 48
    elif width > 400:
        fontsize = 36
    elif width > 300:
        fontsize = 24
    elif width > 200:
        fontsize = 16
        
    spacer = (width/25) + 2
    textGap = 3        # gap between text and image panels
    leftTextWidth = 0
    textHeight = 0
    

    # get the rendered splitview, with images surrounded on all sides by spacer
    sv = getSplitView(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames, colourChannels, 
            mergedIndexes, mergedColours, width, height, spacer, algorithm, stepping, scalebar, overlayColour)
    
    font = imgUtil.getFont(fontsize)
    mode = "RGB"
    white = (255, 255, 255)
    textHeight = font.getsize("Textq")[1]
    topSpacer = spacer + textHeight + textGap
    #textCanvas = Image.new(mode, (1,1), white)
    #textdraw = ImageDraw.Draw(textCanvas)
    #h = textdraw.textsize("Textq", font=font) [1]
    
    # if adding text to the left, write the text on horizontal canvas, then rotate to vertical (below)
    if imageLabels:
        # find max number of labels
        maxCount = 0 
        rowHeights = []
        for row in imageLabels:
            maxCount = max(maxCount, len(row))
        leftTextWidth = (textHeight + textGap) * maxCount
        size = (sv.size[1], leftTextWidth)    # make the canvas as wide as the panels height
        textCanvas = Image.new(mode, size, white)
        textdraw = ImageDraw.Draw(textCanvas)
        px = spacer
        imageLabels.reverse()
        for row in imageLabels:
            py = leftTextWidth - textGap # start at bottom
            for l, label in enumerate(row):
                py = py - textHeight    # find the top of this row
                w = textdraw.textsize(label, font=font) [0]
                inset = int((height - w) / 2)
                textdraw.text((px+inset, py), label, font=font, fill=(0,0,0))
                py = py - textGap    # add space between rows
            px = px + spacer + height         # spacer between each row
        
    
    topTextHeight = textHeight + textGap
    if (mergedNames):
        topTextHeight = ((textHeight) * len(mergedIndexes))
    # make a canvas big-enough to add text to the images. 
    canvasWidth = leftTextWidth + sv.size[0]
    canvasHeight = topTextHeight + sv.size[1]
    size = (canvasWidth, canvasHeight)
    canvas = Image.new(mode, size, white)        # create a canvas of appropriate width, height
    
    # add the split-view panel
    pasteX = leftTextWidth
    pasteY = topTextHeight
    imgUtil.pasteImage(sv, canvas, pasteX, pasteY)
    
    draw = ImageDraw.Draw(canvas)
    
    # add text to rows
    # want it to be vertical. Rotate and paste the text canvas from above
    if imageLabels:    
        textV = textCanvas.rotate(90)
        imgUtil.pasteImage(textV, canvas, spacer, topTextHeight)
    
    # add text to columns 
    px = spacer + leftTextWidth
    py = topTextHeight + spacer - (textHeight + textGap)    # edges of panels - rowHeight
    for index in splitIndexes:
        # calculate the position of the text, centered above the image
        w = font.getsize(channelNames[index]) [0]
        inset = int((width - w) / 2)
        # text is coloured if channel is grey AND in the merged image
        rgba = (0,0,0,255)
        if index in mergedIndexes:
            if (not colourChannels) and (index in mergedColours):
                rgba = tuple(mergedColours[index])
                if rgba == (255,255,255,255):    # if white (unreadable), needs to be black!
                    rgba = (0,0,0,255)
        draw.text((px+inset, py), channelNames[index], font=font, fill=rgba)
        px = px + width + spacer
    
    # add text for combined image
    if (mergedNames):
        mergedIndexes.reverse()
        print "Adding merged channel names..."
        for index in mergedIndexes:
            rgba = (0,0,0,255)
            if index in mergedColours:
                rgba = tuple(mergedColours[index])
                print index, channelNames[index], rgba
                if rgba == (255,255,255, 255):    # if white (unreadable), needs to be black!
                    rgba = (0,0,0,255)
            name = channelNames[index]
            combTextWidth = font.getsize(name)[0]
            inset = int((width - combTextWidth) / 2)
            draw.text((px + inset, py), name, font=font, fill=rgba)
            py = py - textHeight  
    else:
        combTextWidth = font.getsize("Merged")[0]
        inset = int((width - combTextWidth) / 2)
        px = px + inset
        draw.text((px, py), "Merged", font=font, fill=(0,0,0))
    
    return canvas
Пример #15
0
def makeThumbnailFigure(conn, scriptParams):
    """
    Makes the figure using the parameters in @scriptParams, attaches the
    figure to the parent Project/Dataset, and returns the file-annotation ID

    @ returns       Returns the id of the originalFileLink child. (ID object,
                    not value)
    """

    log("Thumbnail figure created by OMERO")
    log("")

    message = ""

    # Get the objects (images or datasets)
    objects, logMessage = scriptUtil.getObjects(conn, scriptParams)
    message += logMessage
    if not objects:
        return None, message

    # Get parent
    parent = None
    if "Parent_ID" in scriptParams and len(scriptParams["IDs"]) > 1:
        if scriptParams["Data_Type"] == "Image":
            parent = conn.getObject("Dataset", scriptParams["Parent_ID"])
        else:
            parent = conn.getObject("Project", scriptParams["Parent_ID"])

    if parent is None:
        parent = objects[0]  # Attach figure to the first object

    parentClass = parent.OMERO_CLASS
    log("Figure will be linked to %s%s: %s"
        % (parentClass[0].lower(), parentClass[1:], parent.getName()))

    tagIds = []
    if "Tag_IDs" in scriptParams:
        tagIds = scriptParams['Tag_IDs']
    if len(tagIds) == 0:
        tagIds = None

    showUntagged = False
    if (tagIds):
        showUntagged = scriptParams["Show_Untagged_Images"]

    thumbSize = scriptParams["Thumbnail_Size"]
    maxColumns = scriptParams["Max_Columns"]

    figHeight = 0
    figWidth = 0
    dsCanvases = []

    if scriptParams["Data_Type"] == "Dataset":
        for dataset in objects:
            log("Dataset: %s     ID: %d"
                % (dataset.getName(), dataset.getId()))
            images = list(dataset.listChildren())
            dsCanvas = paintDatasetCanvas(
                conn, images, dataset.getName(), tagIds, showUntagged,
                length=thumbSize, colCount=maxColumns)
            if dsCanvas is None:
                continue
            dsCanvases.append(dsCanvas)
            figHeight += dsCanvas.size[1]
            figWidth = max(figWidth, dsCanvas.size[0])
    else:
        imageCanvas = paintDatasetCanvas(
            conn, objects, "", tagIds,
            showUntagged, length=thumbSize, colCount=maxColumns)
        dsCanvases.append(imageCanvas)
        figHeight += imageCanvas.size[1]
        figWidth = max(figWidth, imageCanvas.size[0])

    if len(dsCanvases) == 0:
        message += "No figure created"
        return None, message

    figure = Image.new("RGB", (figWidth, figHeight), WHITE)
    y = 0
    for ds in dsCanvases:
        imgUtil.pasteImage(ds, figure, 0, y)
        y += ds.size[1]

    log("")
    figLegend = "\n".join(logLines)

    format = scriptParams["Format"]
    figureName = scriptParams["Figure_Name"]
    figureName = os.path.basename(figureName)
    output = "localfile"

    if format == 'PNG':
        output = output + ".png"
        figureName = figureName + ".png"
        figure.save(output, "PNG")
        mimetype = "image/png"
    elif format == 'TIFF':
        output = output + ".tiff"
        figureName = figureName + ".tiff"
        figure.save(output, "TIFF")
        mimetype = "image/tiff"
    else:
        output = output + ".jpg"
        figureName = figureName + ".jpg"
        figure.save(output)
        mimetype = "image/jpeg"

    namespace = NSCREATED + "/omero/figure_scripts/Thumbnail_Figure"
    fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation(
        conn, output, parent, output="Thumbnail figure", mimetype=mimetype,
        ns=namespace, desc=figLegend, origFilePathAndName=figureName)
    message += faMessage

    return fileAnnotation, message
Пример #16
0
def getSplitView(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames, colourChannels, mergedIndexes, 
        mergedColours, width=None, height=None, spacer = 12, algorithm = None, stepping = 1, scalebar = None, overlayColour=(255,255,255)):
    """ This method makes a figure of a number of images, arranged in rows with each row being the split-view
    of a single image. The channels are arranged left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the server, but it's channels will be
    turned on/off according to @mergedIndexes. 
    No text labels are added to the image at this stage. 
    
    The figure is returned as a PIL 'Image' 
    
    @ session    session for server access
    @ pixelIds        a list of the Ids for the pixels we want to display
    @ zStart        the start of Z-range for projection
    @ zEnd             the end of Z-range for projection
    @ splitIndexes     a list of the channel indexes to display. Same channels for each image/row
    @ channelNames         the Map of index:names to go above the columns for each split channel
    @ colourChannels     the colour to make each column/ channel
    @ mergedIndexes      list or set of channels in the merged image 
    @ mergedColours     index: colour dictionary of channels in the merged image
    @ width            the size in pixels to show each panel
    @ height        the size in pixels to show each panel
    @ spacer        the gap between images and around the figure. Doubled between rows. 
    """
    
    if algorithm is None:    # omero::constants::projection::ProjectionType
        algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY
    timepoint = 0
    mode = "RGB"
    white = (255, 255, 255)
    
    # create a rendering engine
    re = conn.createRenderingEngine()
    queryService = conn.getQueryService()
    
    rowPanels = []
    totalHeight = 0
    totalWidth = 0
    maxImageWidth = 0
    
    physicalSizeX = 0 
    
    log("Split View Rendering Log...")
    
    if zStart >-1 and zEnd >-1:
        alString = str(algorithm).replace("INTENSITY", " Intensity").capitalize()
        log("All images projected using '%s' projection with step size: %d  start: %d  end: %d" 
            % (alString, stepping, zStart+1, zEnd+1))
    else:
        log("Images show last-viewed Z-section")
    
    for row, pixelsId in enumerate(pixelIds):
        log("Rendering row %d" % (row+1))
        
        pixels = queryService.get("Pixels", pixelsId)
        sizeX = pixels.getSizeX().getValue()
        sizeY = pixels.getSizeY().getValue()
        sizeZ = pixels.getSizeZ().getValue()
        sizeC = pixels.getSizeC().getValue()
        
        if pixels.getPhysicalSizeX():
            physicalX = pixels.getPhysicalSizeX().getValue()
        else:
            physicalX = 0 
        if pixels.getPhysicalSizeY():
            physicalY = pixels.getPhysicalSizeY().getValue()
        else:
            physicalY = 0
        log("  Pixel size (um): x: %.3f  y: %.3f" % (physicalX, physicalY))
        if row == 0:    # set values for primary image
            physicalSizeX = physicalX
            physicalSizeY = physicalY
        else:            # compare primary image with current one
            if physicalSizeX != physicalX or physicalSizeY != physicalY:
                log(" WARNING: Images have different pixel lengths. Scales are not comparable.")
        
        log("  Image dimensions (pixels): x: %d  y: %d" % (sizeX, sizeY))
        maxImageWidth = max(maxImageWidth, sizeX)
        
        # set up rendering engine with the pixels
        re.lookupPixels(pixelsId)
        if not re.lookupRenderingDef(pixelsId):
            re.resetDefaults()
        if not re.lookupRenderingDef(pixelsId):
            raise "Failed to lookup Rendering Def"
        re.load()
        
        proStart = zStart
        proEnd = zEnd
        # make sure we're within Z range for projection. 
        if proEnd >= sizeZ:
            proEnd = sizeZ - 1
            if proStart > sizeZ:
                proStart = 0
            log(" WARNING: Current image has fewer Z-sections than the primary image.")
            
        # if we have an invalid z-range (start or end less than 0), show default Z only
        if proStart < 0 or proEnd < 0:
            proStart = re.getDefaultZ()
            proEnd = proStart
            log("  Display Z-section: %d" % (proEnd+1))
        else:
            log("  Projecting z range: %d - %d   (max Z is %d)" % (proStart+1, proEnd+1, sizeZ))
        


        # turn on channels in mergedIndexes.
        for i in mergedIndexes:
            if i >= sizeC:
                channelMismatch = True
            else:
                re.setActive(i, True)
                if i in mergedColours:
                    re.setRGBA(i, *mergedColours[i])

        # get the combined image, using the existing rendering settings
        channelsString = ", ".join([channelNames[i] for i in mergedIndexes])
        log("  Rendering merged channels: %s" % channelsString)
        if proStart != proEnd:
            overlay = re.renderProjectedCompressed(algorithm, timepoint, stepping, proStart, proEnd)
        else:
            planeDef = omero.romio.PlaneDef()
            planeDef.z = proStart
            planeDef.t = timepoint
            overlay = re.renderCompressed(planeDef)


        # now get each channel in greyscale (or colour)
        # a list of renderedImages (data as Strings) for the split-view row
        renderedImages = []
        i = 0
        channelMismatch = False
        # first, turn off all channels in pixels
        for i in range(sizeC): 
            re.setActive(i, False)    
    
        # for each channel in the splitview...
        for index in splitIndexes:
            if index >= sizeC:
                channelMismatch = True        # can't turn channel on - simply render black square! 
                renderedImages.append(None)
            else:
                re.setActive(index, True)                # turn channel on
                if colourChannels:                            # if split channels are coloured...
                    if index in mergedIndexes:            # and this channel is in the combined image
                        if index in mergedColours: 
                            rgba = tuple(mergedColours[index])
                            print "Setting channel to color", index, rgba
                            re.setRGBA(index, *rgba)        # set coloured 
                        else:
                            mergedColours[index] = re.getRGBA(index)
                    else:
                        re.setRGBA(index,255,255,255,255)    # otherwise set white (max alpha)
                else:
                    re.setRGBA(index,255,255,255,255)    # if not colourChannels - channels are white
                info = (index, re.getChannelWindowStart(index), re.getChannelWindowEnd(index))
                log("  Render channel: %s  start: %d  end: %d" % info)
                if proStart != proEnd:
                    renderedImg = re.renderProjectedCompressed(algorithm, timepoint, stepping, proStart, proEnd)
                else:
                    planeDef = omero.romio.PlaneDef()
                    planeDef.z = proStart
                    planeDef.t = timepoint
                    renderedImg = re.renderCompressed(planeDef)
                renderedImages.append(renderedImg)
            if index < sizeC:
                re.setActive(index, False)                # turn the channel off again!

        if channelMismatch:
            log(" WARNING channel mismatch: The current image has fewer channels than the primary image.")
        
        # make a canvas for the row of splitview images...
        imageCount = len(renderedImages) + 1     # extra image for combined image
        canvasWidth = ((width + spacer) * imageCount) + spacer
        canvasHeight = spacer + height
        size = (canvasWidth, canvasHeight)
        canvas = Image.new(mode, size, white)        # create a canvas of appropriate width, height
    
        px = spacer
        py = spacer/2
        col = 0
        # paste the images in
        for img in renderedImages:
            if img is None:
                im = Image.new(mode, (sizeX, sizeY), (0,0,0))
            else:
                im = Image.open(StringIO.StringIO(img))
            i = imgUtil.resizeImage(im, width, height)
            imgUtil.pasteImage(i, canvas, px, py)
            px = px + width + spacer
            col = col + 1
    
        # add combined image, after resizing and adding scale bar 
        i = Image.open(StringIO.StringIO(overlay))
        scaledImage = imgUtil.resizeImage(i, width, height)
        if scalebar:
            xIndent = spacer
            yIndent = xIndent
            zoom = imgUtil.getZoomFactor(i.size, width, height)     # if we've scaled to half size, zoom = 2
            sbar = float(scalebar) / zoom            # and the scale bar will be half size
            status, logMsg = figUtil.addScalebar(sbar, xIndent, yIndent, scaledImage, pixels, overlayColour)
            log(logMsg)

        imgUtil.pasteImage(scaledImage, canvas, px, py)
    
        totalWidth = max(totalWidth, canvasWidth)    # most should be same width anyway
        totalHeight = totalHeight + canvasHeight    # add together the heights of each row
        rowPanels.append(canvas)
        
    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2 spacer top and bottom
    figureSize = (totalWidth, totalHeight+spacer)
    figureCanvas = Image.new(mode, figureSize, white)
    
    rowY = spacer/2
    for row in rowPanels:
        imgUtil.pasteImage(row, figureCanvas, 0, rowY)
        rowY = rowY + row.size[1]

    return figureCanvas
Пример #17
0
def addLeftLabels(panelCanvas, imageLabels, rowIndex, width, spacer):
    """
    Takes a canvas of panels and adds one or more labels to the left,
    with the text aligned vertically.
    NB: We are passed the set of labels for ALL image panels (as well as the
    index of the current image panel) so that we know what is the max label
    count and can give all panels the same margin on the left.

    @param panelCanvas:     PIL image - add labels to the left of this
    @param imageLabels:     A series of label lists, one per image. We only
                            add labels from one list
    @param rowIndex:        The index of the label list we're going to use
                            from imageLabels
    @param width:           Simply used for finding a suitable font size
    @param spacer:          Space between panels
    """

    # add lables to row...
    mode = "RGB"
    white = (255, 255, 255)
    font = imgUtil.getFont(width / 12)
    textHeight = font.getsize("Sampleq")[1]
    textGap = spacer / 2
    #rowSpacing = panelCanvas.size[1]/len(pixelIds)

    # find max number of labels
    maxCount = 0
    for row in imageLabels:
        maxCount = max(maxCount, len(row))
    leftTextHeight = (textHeight + textGap) * maxCount
    # make the canvas as wide as the panels height
    leftTextWidth = panelCanvas.size[1]
    size = (leftTextWidth, leftTextHeight)
    textCanvas = Image.new(mode, size, white)
    textdraw = ImageDraw.Draw(textCanvas)

    labels = imageLabels[rowIndex]
    py = leftTextHeight - textGap  # start at bottom
    for l, label in enumerate(labels):
        py = py - textHeight  # find the top of this row
        w = textdraw.textsize(label, font=font)[0]
        inset = int((leftTextWidth - w) / 2)
        textdraw.text((inset, py), label, font=font, fill=(0, 0, 0))
        py = py - textGap  # add space between rows

    # make a canvas big-enough to add text to the images.
    canvasWidth = leftTextHeight + panelCanvas.size[0]
    # TextHeight will be width once rotated
    canvasHeight = panelCanvas.size[1]
    size = (canvasWidth, canvasHeight)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    # add the panels to the canvas
    pasteX = leftTextHeight
    pasteY = 0
    imgUtil.pasteImage(panelCanvas, canvas, pasteX, pasteY)

    # add text to rows
    # want it to be vertical. Rotate and paste the text canvas from above
    if imageLabels:
        textV = textCanvas.rotate(90)
        imgUtil.pasteImage(textV, canvas, spacer / 2, 0)

    return canvas
Пример #18
0
def getSplitView(conn, imageIds, pixelIds, mergedIndexes, mergedColours,
                 width, height, imageLabels, spacer, algorithm, stepping,
                 scalebar, overlayColour, roiZoom, maxColumns,
                 showRoiDuration, roiLabel):
    """
    This method makes a figure of a number of images, arranged in rows with
    each row being the split-view of a single image. The channels are arranged
    left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the
    server, but it's channels will be turned on/off according to
    @mergedIndexes.

    The figure is returned as a PIL 'Image'

    @ session           session for server access
    @ pixelIds          a list of the Ids for the pixels we want to display
    @ mergedIndexes     list or set of channels in the merged image
    @ mergedColours     index: colour dictionary of channels in the merged
                        image
    @ width             the size in pixels to show each panel
    @ height            the size in pixels to show each panel
    @ spacer            the gap between images and around the figure. Doubled
                        between rows.
    """

    roiService = conn.getRoiService()
    re = conn.createRenderingEngine()
    queryService = conn.getQueryService()    # only needed for movie

    # establish dimensions and roiZoom for the primary image
    # getTheseValues from the server
    for iid in imageIds:
        rect = getRectangle(roiService, iid, roiLabel)
        if rect is not None:
            break

    if rect is None:
        log("Found no images with rectangle ROIs")
        return
    x, y, roiWidth, roiHeight, timeShapeMap = rect

    roiOutline = ((max(width, height)) / 200) + 1

    if roiZoom is None:
        # get the pixels for priamry image.
        pixels = queryService.get("Pixels", pixelIds[0])
        sizeY = pixels.getSizeY().getValue()

        roiZoom = float(height) / float(roiHeight)
        log("ROI zoom set by primary image is %F X" % roiZoom)
    else:
        log("ROI zoom: %F X" % roiZoom)

    textGap = spacer/3
    fontsize = 12
    if width > 500:
        fontsize = 48
    elif width > 400:
        fontsize = 36
    elif width > 300:
        fontsize = 24
    elif width > 200:
        fontsize = 16
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    maxCount = 0
    for row in imageLabels:
        maxCount = max(maxCount, len(row))
    leftTextWidth = (textHeight + textGap) * maxCount + spacer

    maxSplitPanelWidth = 0
    totalcanvasHeight = 0
    mergedImages = []
    roiSplitPanes = []
    topSpacers = []         # space for labels above each row

    for row, pixelsId in enumerate(pixelIds):
        log("Rendering row %d" % (row))

        # need to get the roi dimensions from the server
        imageId = imageIds[row]
        roi = getRectangle(roiService, imageId, roiLabel)
        if roi is None:
            log("No Rectangle ROI found for this image")
            del imageLabels[row]    # remove the corresponding labels
            continue
        roiX, roiY, roiWidth, roiHeight, timeShapeMap = roi

        pixels = queryService.get("Pixels", pixelsId)
        sizeX = pixels.getSizeX().getValue()
        sizeY = pixels.getSizeY().getValue()

        # work out if any additional zoom is needed (if the full-sized image
        # is different size from primary image)
        fullSize = (sizeX, sizeY)
        imageZoom = imgUtil.getZoomFactor(fullSize, width, height)
        if imageZoom != 1.0:
            log("  Scaling down the full-size image by a factor of %F"
                % imageZoom)

        log("  ROI location (top-left of first frame) x: %d  y: %d  and size"
            " width: %d  height: %d" % (roiX, roiY, roiWidth, roiHeight))
        # get the split pane and full merged image
        roiSplitPane, fullMergedImage, topSpacer = getROImovieView(
            re, queryService, pixels, timeShapeMap, mergedIndexes,
            mergedColours, roiWidth, roiHeight, roiZoom, spacer, algorithm,
            stepping, fontsize, maxColumns, showRoiDuration)

        # and now zoom the full-sized merged image, add scalebar
        mergedImage = imgUtil.resizeImage(fullMergedImage, width, height)
        if scalebar:
            xIndent = spacer
            yIndent = xIndent
            # and the scale bar will be half size
            sbar = float(scalebar) / imageZoom
            status, logMsg = figUtil.addScalebar(
                sbar, xIndent, yIndent, mergedImage, pixels, overlayColour)
            log(logMsg)

        # draw ROI onto mergedImage...
        # recalculate roi if the image has been zoomed
        x = roiX / imageZoom
        y = roiY / imageZoom
        roiX2 = (roiX + roiWidth) / imageZoom
        roiY2 = (roiY + roiHeight) / imageZoom
        drawRectangle(
            mergedImage, x, y, roiX2, roiY2, overlayColour, roiOutline)

        # note the maxWidth of zoomed panels and total height for row
        maxSplitPanelWidth = max(maxSplitPanelWidth, roiSplitPane.size[0])
        totalcanvasHeight += spacer + max(height+topSpacer,
                                          roiSplitPane.size[1])

        mergedImages.append(mergedImage)
        roiSplitPanes.append(roiSplitPane)
        topSpacers.append(topSpacer)

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    canvasWidth = leftTextWidth + width + spacer + maxSplitPanelWidth + spacer
    figureSize = (canvasWidth, totalcanvasHeight + spacer)
    figureCanvas = Image.new("RGB", figureSize, (255, 255, 255))

    rowY = spacer
    for row, image in enumerate(mergedImages):
        labelCanvas = figUtil.getVerticalLabels(imageLabels[row], font,
                                                textGap)
        vOffset = (image.size[1] - labelCanvas.size[1]) / 2
        imgUtil.pasteImage(labelCanvas, figureCanvas, spacer / 2,
                           rowY+topSpacers[row] + vOffset)
        imgUtil.pasteImage(
            image, figureCanvas, leftTextWidth, rowY + topSpacers[row])
        x = leftTextWidth + width + spacer
        imgUtil.pasteImage(roiSplitPanes[row], figureCanvas, x, rowY)
        rowY = rowY + max(image.size[1] + topSpacers[row],
                          roiSplitPanes[row].size[1]) + spacer

    return figureCanvas
Пример #19
0
def createMovieFigure(conn, pixelIds, tIndexes, zStart, zEnd, width, height,
                      spacer, algorithm, stepping, scalebar, overlayColour,
                      timeUnits, imageLabels, maxColCount):
    """
    Makes the complete Movie figure: A canvas showing an image per row with
    multiple columns showing frames from each image/movie. Labels obove each
    frame to show the time-stamp of that frame in the specified units and
    labels on the left name each image.

    @param session          The OMERO session
    @param pixelIds         A list of the Pixel IDs for the images in the
                            figure
    @param tIndexes         A list of tIndexes to display frames from
    @param zStart           Projection Z-start
    @param zEnd             Projection Z-end
    @param width            Maximum width of panels
    @param height           Max height of panels
    @param spacer           Space between panels
    @param algorithm        Projection algorithm e.g. "MAXIMUMINTENSITY"
    @param stepping         Projecttion z-step
    @param scalebar         A number of microns for scale-bar
    @param overlayColour    Colour of the scale-bar as tuple (255,255,255)
    @param timeUnits        A string such as "SECS"
    @param imageLabels      A list of lists, corresponding to pixelIds, for
                            labelling each image with one or more strings.
    """

    mode = "RGB"
    white = (255, 255, 255)

    # create a rendering engine
    re = conn.createRenderingEngine()
    queryService = conn.getQueryService()

    rowPanels = []
    totalHeight = 0
    totalWidth = 0
    maxImageWidth = 0
    physicalSizeX = 0

    for row, pixelsId in enumerate(pixelIds):
        log("Rendering row %d" % (row))

        pixels = queryService.get("Pixels", pixelsId)
        sizeX = pixels.getSizeX().getValue()
        sizeY = pixels.getSizeY().getValue()
        sizeZ = pixels.getSizeZ().getValue()
        sizeT = pixels.getSizeT().getValue()

        if pixels.getPhysicalSizeX():
            physicalX = pixels.getPhysicalSizeX().getValue()
        else:
            physicalX = 0
        if pixels.getPhysicalSizeY():
            physicalY = pixels.getPhysicalSizeY().getValue()
        else:
            physicalY = 0
        log("  Pixel size (um): x: %s  y: %s" %
            (str(physicalX), str(physicalY)))
        if row == 0:  # set values for primary image
            physicalSizeX = physicalX
            physicalSizeY = physicalY
        else:  # compare primary image with current one
            if physicalSizeX != physicalX or physicalSizeY != physicalY:
                log(" WARNING: Images have different pixel lengths. Scales"
                    " are not comparable.")

        log("  Image dimensions (pixels): x: %d  y: %d" % (sizeX, sizeY))
        maxImageWidth = max(maxImageWidth, sizeX)

        # set up rendering engine with the pixels
        re.lookupPixels(pixelsId)
        if not re.lookupRenderingDef(pixelsId):
            re.resetDefaults()
        if not re.lookupRenderingDef(pixelsId):
            raise "Failed to lookup Rendering Def"
        re.load()

        proStart = zStart
        proEnd = zEnd
        # make sure we're within Z range for projection.
        if proEnd >= sizeZ:
            proEnd = sizeZ - 1
            if proStart > sizeZ:
                proStart = 0
            log(" WARNING: Current image has fewer Z-sections than the"
                " primary image.")

        # if we have an invalid z-range (start or end less than 0), show
        # default Z only
        if proStart < 0 or proEnd < 0:
            proStart = re.getDefaultZ()
            proEnd = proStart
            log("  Display Z-section: %d" % (proEnd + 1))
        else:
            log("  Projecting z range: %d - %d   (max Z is %d)" %
                (proStart + 1, proEnd + 1, sizeZ))

        # now get each channel in greyscale (or colour)
        # a list of renderedImages (data as Strings) for the split-view row
        renderedImages = []

        for time in tIndexes:
            if time >= sizeT:
                log(" WARNING: This image does not have Time frame: %d. "
                    "(max is %d)" % (time + 1, sizeT))
            else:
                if proStart != proEnd:
                    renderedImg = re.renderProjectedCompressed(
                        algorithm, time, stepping, proStart, proEnd)
                else:
                    planeDef = omero.romio.PlaneDef()
                    planeDef.z = proStart
                    planeDef.t = time
                    renderedImg = re.renderCompressed(planeDef)
                # create images and resize, add to list
                image = Image.open(StringIO.StringIO(renderedImg))
                resizedImage = imgUtil.resizeImage(image, width, height)
                renderedImages.append(resizedImage)

        # make a canvas for the row of splitview images...
        # (will add time labels above each row)
        colCount = min(maxColCount, len(renderedImages))
        rowCount = int(math.ceil(float(len(renderedImages)) / colCount))
        font = imgUtil.getFont(width / 12)
        fontHeight = font.getsize("Textq")[1]
        canvasWidth = ((width + spacer) * colCount) + spacer
        canvasHeight = rowCount * (spacer / 2 + fontHeight + spacer + height)
        size = (canvasWidth, canvasHeight)
        # create a canvas of appropriate width, height
        canvas = Image.new(mode, size, white)

        # add text labels
        queryService = conn.getQueryService()
        textX = spacer
        textY = spacer / 4
        colIndex = 0
        timeLabels = figUtil.getTimeLabels(queryService, pixelsId, tIndexes,
                                           sizeT, timeUnits)
        for t, tIndex in enumerate(tIndexes):
            if tIndex >= sizeT:
                continue
            time = timeLabels[t]
            textW = font.getsize(time)[0]
            inset = (width - textW) / 2
            textdraw = ImageDraw.Draw(canvas)
            textdraw.text((textX + inset, textY),
                          time,
                          font=font,
                          fill=(0, 0, 0))
            textX += width + spacer
            colIndex += 1
            if colIndex >= maxColCount:
                colIndex = 0
                textX = spacer
                textY += (spacer / 2 + fontHeight + spacer + height)

        # add scale bar to last frame...
        if scalebar:
            scaledImage = renderedImages[-1]
            xIndent = spacer
            yIndent = xIndent
            # if we've scaled to half size, zoom = 2
            zoom = imgUtil.getZoomFactor(scaledImage.size, width, height)
            # and the scale bar will be half size
            sbar = float(scalebar) / zoom
            status, logMsg = figUtil.addScalebar(sbar, xIndent, yIndent,
                                                 scaledImage, pixels,
                                                 overlayColour)
            log(logMsg)

        px = spacer
        py = spacer + fontHeight
        colIndex = 0
        # paste the images in
        for i, img in enumerate(renderedImages):
            imgUtil.pasteImage(img, canvas, px, py)
            px = px + width + spacer
            colIndex += 1
            if colIndex >= maxColCount:
                colIndex = 0
                px = spacer
                py += (spacer / 2 + fontHeight + spacer + height)

        # Add labels to the left of the panel
        canvas = addLeftLabels(canvas, imageLabels, row, width, spacer)

        # most should be same width anyway
        totalWidth = max(totalWidth, canvas.size[0])
        # add together the heights of each row
        totalHeight = totalHeight + canvas.size[1]

        rowPanels.append(canvas)

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    figureSize = (totalWidth, totalHeight + spacer)
    figureCanvas = Image.new(mode, figureSize, white)

    rowY = spacer / 2
    for row in rowPanels:
        imgUtil.pasteImage(row, figureCanvas, 0, rowY)
        rowY = rowY + row.size[1]

    return figureCanvas
Пример #20
0
def getROIsplitView(re, pixels, zStart, zEnd, splitIndexes, channelNames,
                    mergedNames, colourChannels, mergedIndexes, mergedColours,
                    roiX, roiY, roiWidth, roiHeight, roiZoom, tIndex, spacer,
                    algorithm, stepping, fontsize, showTopLabels):
    """
    This takes a ROI rectangle from an image and makes a split view canvas of
    the region in the ROI, zoomed by a defined factor.

    @param    re        The OMERO rendering engine.
    """

    if algorithm is None:    # omero::constants::projection::ProjectionType
        algorithm = ProjectionType.MAXIMUMINTENSITY
    mode = "RGB"
    white = (255, 255, 255)

    sizeX = pixels.getSizeX().getValue()
    sizeY = pixels.getSizeY().getValue()
    sizeZ = pixels.getSizeZ().getValue()
    sizeC = pixels.getSizeC().getValue()

    if pixels.getPhysicalSizeX():
        physicalX = pixels.getPhysicalSizeX().getValue()
    else:
        physicalX = 0
    if pixels.getPhysicalSizeY():
        physicalY = pixels.getPhysicalSizeY().getValue()
    else:
        physicalY = 0
    log("  Pixel size (um): x: %.3f  y: %.3f" % (physicalX, physicalY))
    log("  Image dimensions (pixels): x: %d  y: %d" % (sizeX, sizeY))

    log(" Projecting ROIs...")
    proStart = zStart
    proEnd = zEnd
    # make sure we're within Z range for projection.
    if proEnd >= sizeZ:
        proEnd = sizeZ - 1
        if proStart > sizeZ:
            proStart = 0
        log(" WARNING: Current image has fewer Z-sections than the primary"
            " image projection.")
    if proStart < 0:
        proStart = 0
    log("  Projecting z range: %d - %d   (max Z is %d)"
        % (proStart+1, proEnd+1, sizeZ))
    # set up rendering engine with the pixels
    pixelsId = pixels.getId().getValue()
    re.lookupPixels(pixelsId)
    if not re.lookupRenderingDef(pixelsId):
        re.resetDefaults()
    if not re.lookupRenderingDef(pixelsId):
        raise "Failed to lookup Rendering Def"
    re.load()

    # if we are missing some merged colours, get them from rendering engine.
    for index in mergedIndexes:
        if index not in mergedColours:
            color = tuple(re.getRGBA(index))
            mergedColours[index] = color
            print "Adding colour to index", color, index

    # now get each channel in greyscale (or colour)
    # a list of renderedImages (data as Strings) for the split-view row
    renderedImages = []
    panelWidth = 0
    channelMismatch = False
    # first, turn off all channels in pixels
    for i in range(sizeC):
        re.setActive(i, False)

    # for each channel in the splitview...
    box = (roiX, roiY, roiX+roiWidth, roiY+roiHeight)
    for index in splitIndexes:
        if index >= sizeC:
            # can't turn channel on - simply render black square!
            channelMismatch = True
        else:
            re.setActive(index, True)                # turn channel on
            if colourChannels:
                # if split channels are coloured...
                if index in mergedColours:
                    # and this channel is in the combined image
                    rgba = tuple(mergedColours[index])
                    re.setRGBA(index, *rgba)        # set coloured
                else:
                    re.setRGBA(index, 255, 255, 255, 255)
            else:
                # if not colourChannels - channels are white
                re.setRGBA(index, 255, 255, 255, 255)
            info = (channelNames[index], re.getChannelWindowStart(index),
                    re.getChannelWindowEnd(index))
            log("  Render channel: %s  start: %d  end: %d" % info)
            if proStart == proEnd:
                # if it's a single plane, we can render a region (region not
                # supported with projection)
                planeDef = omero.romio.PlaneDef()
                planeDef.z = long(proStart)
                planeDef.t = long(tIndex)
                regionDef = omero.romio.RegionDef()
                regionDef.x = roiX
                regionDef.y = roiY
                regionDef.width = roiWidth
                regionDef.height = roiHeight
                planeDef.region = regionDef
                rPlane = re.renderCompressed(planeDef)
                roiImage = Image.open(StringIO.StringIO(rPlane))
            else:
                projection = re.renderProjectedCompressed(
                    algorithm, tIndex, stepping, proStart, proEnd)
                fullImage = Image.open(StringIO.StringIO(projection))
                roiImage = fullImage.crop(box)
                roiImage.load()
                # hoping that when we zoom, don't zoom fullImage
            if roiZoom is not 1:
                newSize = (int(roiWidth*roiZoom), int(roiHeight*roiZoom))
                roiImage = roiImage.resize(newSize, Image.ANTIALIAS)
            renderedImages.append(roiImage)
            panelWidth = roiImage.size[0]
            re.setActive(index, False)  # turn the channel off again!

    # turn on channels in mergedIndexes.
    for i in mergedIndexes:
        if i >= sizeC:
            channelMismatch = True
        else:
            re.setActive(i, True)
            if i in mergedColours:
                rgba = mergedColours[i]
                re.setRGBA(i, *rgba)

    # get the combined image, using the existing rendering settings
    channelsString = ", ".join([str(i) for i in mergedIndexes])
    log("  Rendering merged channels: %s" % channelsString)
    if proStart != proEnd:
        merged = re.renderProjectedCompressed(
            algorithm, tIndex, stepping, proStart, proEnd)
    else:
        planeDef = omero.romio.PlaneDef()
        planeDef.z = proStart
        planeDef.t = tIndex
        merged = re.renderCompressed(planeDef)
    fullMergedImage = Image.open(StringIO.StringIO(merged))
    roiMergedImage = fullMergedImage.crop(box)
    # make sure this is not just a lazy copy of the full image
    roiMergedImage.load()

    if roiZoom is not 1:
        newSize = (int(roiWidth*roiZoom), int(roiHeight*roiZoom))
        roiMergedImage = roiMergedImage.resize(newSize, Image.ANTIALIAS)

    if channelMismatch:
        log(" WARNING channel mismatch: The current image has fewer channels"
            " than the primary image.")

    if panelWidth == 0:  # E.g. No split-view panels
        panelWidth = roiMergedImage.size[0]

    # now assemble the roi split-view canvas
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    topSpacer = 0
    if showTopLabels:
        if mergedNames:
            topSpacer = (textHeight * len(mergedIndexes)) + spacer
        else:
            topSpacer = textHeight + spacer
    imageCount = len(renderedImages) + 1     # extra image for merged image
    # no spaces around panels
    canvasWidth = ((panelWidth + spacer) * imageCount) - spacer
    canvasHeight = roiMergedImage.size[1] + topSpacer
    print "imageCount", imageCount, "canvasWidth", canvasWidth, \
        "canvasHeight", canvasHeight
    size = (canvasWidth, canvasHeight)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    px = 0
    textY = topSpacer - textHeight - spacer/2
    panelY = topSpacer
    # paste the split images in, with channel labels
    draw = ImageDraw.Draw(canvas)
    print "mergedColours", mergedColours
    for i, index in enumerate(splitIndexes):
        label = channelNames[index]
        indent = (panelWidth - (font.getsize(label)[0])) / 2
        # text is coloured if channel is not coloured AND in the merged image
        rgb = (0, 0, 0)
        if index in mergedColours:
            if not colourChannels:
                rgb = tuple(mergedColours[index])
                if rgb == (255, 255, 255, 255):
                    # if white (unreadable), needs to be black!
                    rgb = (0, 0, 0)
        if showTopLabels:
            draw.text((px+indent, textY), label, font=font, fill=rgb)
        if i < len(renderedImages):
            imgUtil.pasteImage(renderedImages[i], canvas, px, panelY)
        px = px + panelWidth + spacer
    # and the merged image
    if showTopLabels:
        # indent = (panelWidth - (font.getsize("Merged")[0])) / 2
        # draw.text((px+indent, textY), "Merged", font=font, fill=(0,0,0))
        if (mergedNames):
            for index in mergedIndexes:
                if index in mergedColours:
                    rgb = tuple(mergedColours[index])
                    if rgb == (255, 255, 255, 255):
                        rgb = (0, 0, 0)
                else:
                    rgb = (0, 0, 0)
                if index in channelNames:
                    name = channelNames[index]
                else:
                    name = str(index)
                combTextWidth = font.getsize(name)[0]
                inset = int((panelWidth - combTextWidth) / 2)
                draw.text((px + inset, textY), name, font=font, fill=rgb)
                textY = textY - textHeight
        else:
            combTextWidth = font.getsize("Merged")[0]
            inset = int((panelWidth - combTextWidth) / 2)
            draw.text((px + inset, textY), "Merged", font=font,
                      fill=(0, 0, 0))
    imgUtil.pasteImage(roiMergedImage, canvas, px, panelY)

    # return the roi splitview canvas, as well as the full merged image
    return (canvas, fullMergedImage, panelY)
Пример #21
0
def paintDatasetCanvas(conn,
                       images,
                       title,
                       tagIds=None,
                       showUntagged=False,
                       colCount=10,
                       length=100):
    """
        Paints and returns a canvas of thumbnails from images, laid out in a
        set number of columns.
        Title and date-range of the images is printed above the thumbnails,
        to the left and right, respectively.

        @param conn:        Blitz connection
        @param imageIds:    Image IDs
        @param title:       title to display at top of figure. String
        @param tagIds:      Optional to sort thumbnails by tag. [long]
        @param colCount:    Max number of columns to lay out thumbnails
        @param length:      Length of longest side of thumbnails
    """

    mode = "RGB"
    figCanvas = None
    spacing = length / 40 + 2

    thumbnailStore = conn.createThumbnailStore()
    # returns  omero.api.ThumbnailStorePrx
    metadataService = conn.getMetadataService()

    if len(images) == 0:
        return None
    timestampMin = images[0].getDate()  # datetime
    timestampMax = timestampMin

    dsImageIds = []
    imagePixelMap = {}
    imageNames = {}

    # sort the images by name
    images.sort(key=lambda x: (x.getName().lower()))

    for image in images:
        imageId = image.getId()
        pixelId = image.getPrimaryPixels().getId()
        name = image.getName()
        dsImageIds.append(imageId)  # make a list of image-IDs
        imagePixelMap[imageId] = pixelId  # and a map of image-ID: pixel-ID
        imageNames[imageId] = name
        timestampMin = min(timestampMin, image.getDate())
        timestampMax = max(timestampMax, image.getDate())

    # set-up fonts
    fontsize = length / 7 + 5
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    topSpacer = spacing + textHeight
    leftSpacer = spacing + textHeight

    tagPanes = []
    maxWidth = 0
    totalHeight = topSpacer

    # if we have a list of tags, then sort images by tag
    if tagIds:
        # Cast to int since List can be any type
        tagIds = [int(tagId) for tagId in tagIds]
        log(" Sorting images by tags: %s" % tagIds)
        tagNames = {}
        taggedImages = {}  # a map of tagId: list-of-image-Ids
        imgTags = {}  # a map of imgId: list-of-tagIds
        for tagId in tagIds:
            taggedImages[tagId] = []

        # look for images that have a tag
        types = ["ome.model.annotations.TagAnnotation"]
        annotations = metadataService.loadAnnotations("Image", dsImageIds,
                                                      types, None, None)
        # filter images by annotation...
        for imageId, tags in annotations.items():
            imgTagIds = []
            for tag in tags:
                tagId = tag.getId().getValue()
                # make a dict of tag-names
                tagNames[tagId] = tag.getTextValue().getValue().decode('utf8')
                print "     Tag:", tagId, tagId in tagIds
                imgTagIds.append(tagId)
            imgTags[imageId] = imgTagIds

        # get a sorted list of {'iid': iid, 'tagKey': tagKey,
        # 'tagIds':orderedTags}
        sortedThumbs = sortImagesByTag(tagIds, imgTags)

        if not showUntagged:
            sortedThumbs = [t for t in sortedThumbs if len(t['tagIds']) > 0]

        # Need to group sets of thumbnails by FIRST tag.
        toptagSets = []
        groupedPixelIds = []
        showSubsetLabels = False
        currentTagStr = None
        for i, img in enumerate(sortedThumbs):
            tagIds = img['tagIds']
            if len(tagIds) == 0:
                tagString = "Not Tagged"
            else:
                tagString = tagNames[tagIds[0]]
            if tagString == currentTagStr or currentTagStr is None:
                # only show subset labels (later) if there are more than 1
                # subset
                if (len(tagIds) > 1):
                    showSubsetLabels = True
                groupedPixelIds.append({
                    'pid': imagePixelMap[img['iid']],
                    'tagIds': tagIds
                })
            else:
                toptagSets.append({
                    'tagText': currentTagStr,
                    'pixelIds': groupedPixelIds,
                    'showSubsetLabels': showSubsetLabels
                })
                showSubsetLabels = len(tagIds) > 1
                groupedPixelIds = [{
                    'pid': imagePixelMap[img['iid']],
                    'tagIds': tagIds
                }]
            currentTagStr = tagString
        toptagSets.append({
            'tagText': currentTagStr,
            'pixelIds': groupedPixelIds,
            'showSubsetLabels': showSubsetLabels
        })

        # Find the indent we need
        maxTagNameWidth = max(
            [font.getsize(ts['tagText'])[0] for ts in toptagSets])
        if showUntagged:
            maxTagNameWidth = max(maxTagNameWidth,
                                  font.getsize("Not Tagged")[0])

        print "toptagSets", toptagSets

        tagSubPanes = []

        # make a canvas for each tag combination
        def makeTagsetCanvas(tagString, tagsetPixIds, showSubsetLabels):
            log(" Tagset: %s  (contains %d images)" %
                (tagString, len(tagsetPixIds)))
            if not showSubsetLabels:
                tagString = None
            subCanvas = imgUtil.paintThumbnailGrid(thumbnailStore,
                                                   length,
                                                   spacing,
                                                   tagsetPixIds,
                                                   colCount,
                                                   topLabel=tagString)
            tagSubPanes.append(subCanvas)

        for toptagSet in toptagSets:
            tagText = toptagSet['tagText']
            showSubsetLabels = toptagSet['showSubsetLabels']
            imageData = toptagSet['pixelIds']
            # loop through all thumbs under TAG, grouping into subsets.
            tagsetPixIds = []
            currentTagStr = None
            for i, img in enumerate(imageData):
                tag_ids = img['tagIds']
                pid = img['pid']
                tagString = ", ".join([tagNames[tid] for tid in tag_ids])
                if tagString == "":
                    tagString = "Not Tagged"
                # Keep grouping thumbs under similar tag set (if not on the
                # last loop)
                if tagString == currentTagStr or currentTagStr is None:
                    tagsetPixIds.append(pid)
                else:
                    # Process thumbs added so far
                    makeTagsetCanvas(currentTagStr, tagsetPixIds,
                                     showSubsetLabels)
                    # reset for next tagset
                    tagsetPixIds = [pid]
                currentTagStr = tagString

            makeTagsetCanvas(currentTagStr, tagsetPixIds, showSubsetLabels)

            maxWidth = max([c.size[0] for c in tagSubPanes])
            totalHeight = sum([c.size[1] for c in tagSubPanes])

            # paste them into a single canvas for each Tag

            leftSpacer = spacing + maxTagNameWidth + 2 * spacing
            # Draw vertical line to right
            size = (leftSpacer + maxWidth, totalHeight)
            tagCanvas = Image.new(mode, size, WHITE)
            pX = leftSpacer
            pY = 0
            for pane in tagSubPanes:
                imgUtil.pasteImage(pane, tagCanvas, pX, pY)
                pY += pane.size[1]
            if tagText is not None:
                draw = ImageDraw.Draw(tagCanvas)
                tt_w, tt_h = font.getsize(tagText)
                h_offset = (totalHeight - tt_h) / 2
                draw.text((spacing, h_offset),
                          tagText,
                          font=font,
                          fill=(50, 50, 50))
            # draw vertical line
            draw.line(
                (leftSpacer - spacing, 0, leftSpacer - spacing, totalHeight),
                fill=(0, 0, 0))
            tagPanes.append(tagCanvas)
            tagSubPanes = []
    else:
        leftSpacer = spacing
        pixelIds = []
        for imageId in dsImageIds:
            log("  Name: %s  ID: %d" % (imageNames[imageId], imageId))
            pixelIds.append(imagePixelMap[imageId])
        figCanvas = imgUtil.paintThumbnailGrid(thumbnailStore, length, spacing,
                                               pixelIds, colCount)
        tagPanes.append(figCanvas)

    # paste them into a single canvas
    tagsetSpacer = length / 3
    maxWidth = max([c.size[0] for c in tagPanes])
    totalHeight = totalHeight + sum(
        [c.size[1] + tagsetSpacer for c in tagPanes]) - tagsetSpacer
    size = (maxWidth, totalHeight)
    fullCanvas = Image.new(mode, size, WHITE)
    pX = 0
    pY = topSpacer
    for pane in tagPanes:
        imgUtil.pasteImage(pane, fullCanvas, pX, pY)
        pY += pane.size[1] + tagsetSpacer

    # create dates for the image timestamps. If dates are not the same, show
    # first - last.
    # firstdate = timestampMin
    # lastdate = timestampMax
    # figureDate = str(firstdate)
    # if firstdate != lastdate:
    #     figureDate = "%s - %s" % (firstdate, lastdate)

    draw = ImageDraw.Draw(fullCanvas)
    # dateWidth = draw.textsize(figureDate, font=font)[0]
    # titleWidth = draw.textsize(title, font=font)[0]
    dateY = spacing
    # dateX = fullCanvas.size[0] - spacing - dateWidth
    draw.text((leftSpacer, dateY), title, font=font, fill=(0, 0, 0))  # title
    # Don't show dates: see
    # https://github.com/openmicroscopy/openmicroscopy/pull/1002
    # if (leftSpacer+titleWidth) < dateX:
    # if there's enough space...
    #     draw.text((dateX, dateY), figureDate, font=font, fill=(0,0,0))
    # add date

    return fullCanvas
def createMovieFigure(conn, pixelIds, tIndexes, zStart, zEnd, width, height,
                      spacer, algorithm, stepping, scalebar, overlayColour,
                      timeUnits, imageLabels, maxColCount):
    """
    Makes the complete Movie figure: A canvas showing an image per row with
    multiple columns showing frames from each image/movie. Labels obove each
    frame to show the time-stamp of that frame in the specified units and
    labels on the left name each image.

    @param session          The OMERO session
    @param pixelIds         A list of the Pixel IDs for the images in the
                            figure
    @param tIndexes         A list of tIndexes to display frames from
    @param zStart           Projection Z-start
    @param zEnd             Projection Z-end
    @param width            Maximum width of panels
    @param height           Max height of panels
    @param spacer           Space between panels
    @param algorithm        Projection algorithm e.g. "MAXIMUMINTENSITY"
    @param stepping         Projecttion z-step
    @param scalebar         A number of microns for scale-bar
    @param overlayColour    Color of the scale bar as tuple (255,255,255)
    @param timeUnits        A string such as "SECS"
    @param imageLabels      A list of lists, corresponding to pixelIds, for
                            labelling each image with one or more strings.
    """

    mode = "RGB"
    white = (255, 255, 255)

    # create a rendering engine
    re = conn.createRenderingEngine()
    queryService = conn.getQueryService()

    rowPanels = []
    totalHeight = 0
    totalWidth = 0
    maxImageWidth = 0
    physicalSizeX = 0

    for row, pixelsId in enumerate(pixelIds):
        log("Rendering row %d" % (row))

        pixels = queryService.get("Pixels", pixelsId)
        sizeX = pixels.getSizeX().getValue()
        sizeY = pixels.getSizeY().getValue()
        sizeZ = pixels.getSizeZ().getValue()
        sizeT = pixels.getSizeT().getValue()

        if pixels.getPhysicalSizeX():
            physicalX = pixels.getPhysicalSizeX().getValue()
            unitsX = pixels.getPhysicalSizeX().getSymbol()
        else:
            physicalX = 0
            unitsX = ""
        if pixels.getPhysicalSizeY():
            physicalY = pixels.getPhysicalSizeY().getValue()
            unitsY = pixels.getPhysicalSizeY().getSymbol()
        else:
            physicalY = 0
            unitsY = ""
        log("  Pixel size: x: %s %s  y: %s %s"
            % (str(physicalX), unitsX, str(physicalY), unitsY))
        if row == 0:    # set values for primary image
            physicalSizeX = physicalX
            physicalSizeY = physicalY
        else:            # compare primary image with current one
            if physicalSizeX != physicalX or physicalSizeY != physicalY:
                log(" WARNING: Images have different pixel lengths. Scales"
                    " are not comparable.")

        log("  Image dimensions (pixels): x: %d  y: %d" % (sizeX, sizeY))
        maxImageWidth = max(maxImageWidth, sizeX)

        # set up rendering engine with the pixels
        re.lookupPixels(pixelsId)
        if not re.lookupRenderingDef(pixelsId):
            re.resetDefaults()
        if not re.lookupRenderingDef(pixelsId):
            raise "Failed to lookup Rendering Def"
        re.load()

        proStart = zStart
        proEnd = zEnd
        # make sure we're within Z range for projection.
        if proEnd >= sizeZ:
            proEnd = sizeZ - 1
            if proStart > sizeZ:
                proStart = 0
            log(" WARNING: Current image has fewer Z-sections than the"
                " primary image.")

        # if we have an invalid z-range (start or end less than 0), show
        # default Z only
        if proStart < 0 or proEnd < 0:
            proStart = re.getDefaultZ()
            proEnd = proStart
            log("  Display Z-section: %d" % (proEnd+1))
        else:
            log("  Projecting z range: %d - %d   (max Z is %d)"
                % (proStart+1, proEnd+1, sizeZ))

        # now get each channel in greyscale (or colour)
        # a list of renderedImages (data as Strings) for the split-view row
        renderedImages = []

        for time in tIndexes:
            if time >= sizeT:
                log(" WARNING: This image does not have Time frame: %d. "
                    "(max is %d)" % (time+1, sizeT))
            else:
                if proStart != proEnd:
                    renderedImg = re.renderProjectedCompressed(
                        algorithm, time, stepping, proStart, proEnd)
                else:
                    planeDef = omero.romio.PlaneDef()
                    planeDef.z = proStart
                    planeDef.t = time
                    renderedImg = re.renderCompressed(planeDef)
                # create images and resize, add to list
                image = Image.open(StringIO.StringIO(renderedImg))
                resizedImage = imgUtil.resizeImage(image, width, height)
                renderedImages.append(resizedImage)

        # make a canvas for the row of splitview images...
        # (will add time labels above each row)
        colCount = min(maxColCount, len(renderedImages))
        rowCount = int(math.ceil(float(len(renderedImages)) / colCount))
        font = imgUtil.getFont(width/12)
        fontHeight = font.getsize("Textq")[1]
        canvasWidth = ((width + spacer) * colCount) + spacer
        canvasHeight = rowCount * (spacer/2 + fontHeight + spacer + height)
        size = (canvasWidth, canvasHeight)
        # create a canvas of appropriate width, height
        canvas = Image.new(mode, size, white)

        # add text labels
        queryService = conn.getQueryService()
        textX = spacer
        textY = spacer/4
        colIndex = 0
        timeLabels = figUtil.getTimeLabels(
            queryService, pixelsId, tIndexes, sizeT, timeUnits)
        for t, tIndex in enumerate(tIndexes):
            if tIndex >= sizeT:
                continue
            time = timeLabels[t]
            textW = font.getsize(time)[0]
            inset = (width - textW) / 2
            textdraw = ImageDraw.Draw(canvas)
            textdraw.text((textX+inset, textY), time, font=font,
                          fill=(0, 0, 0))
            textX += width + spacer
            colIndex += 1
            if colIndex >= maxColCount:
                colIndex = 0
                textX = spacer
                textY += (spacer/2 + fontHeight + spacer + height)

        # add scale bar to last frame...
        if scalebar:
            scaledImage = renderedImages[-1]
            xIndent = spacer
            yIndent = xIndent
            # if we've scaled to half size, zoom = 2
            zoom = imgUtil.getZoomFactor(scaledImage.size, width, height)
            # and the scale bar will be half size
            sbar = float(scalebar) / zoom
            status, logMsg = figUtil.addScalebar(
                sbar, xIndent, yIndent, scaledImage, pixels, overlayColour)
            log(logMsg)

        px = spacer
        py = spacer + fontHeight
        colIndex = 0
        # paste the images in
        for i, img in enumerate(renderedImages):
            imgUtil.pasteImage(img, canvas, px, py)
            px = px + width + spacer
            colIndex += 1
            if colIndex >= maxColCount:
                colIndex = 0
                px = spacer
                py += (spacer/2 + fontHeight + spacer + height)

        # Add labels to the left of the panel
        canvas = addLeftLabels(canvas, imageLabels, row, width, spacer)

        # most should be same width anyway
        totalWidth = max(totalWidth, canvas.size[0])
        # add together the heights of each row
        totalHeight = totalHeight + canvas.size[1]

        rowPanels.append(canvas)

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    figureSize = (totalWidth, totalHeight+spacer)
    figureCanvas = Image.new(mode, figureSize, white)

    rowY = spacer / 2
    for row in rowPanels:
        imgUtil.pasteImage(row, figureCanvas, 0, rowY)
        rowY = rowY + row.size[1]

    return figureCanvas
Пример #23
0
def makeThumbnailFigure(conn, scriptParams):
    """
    Makes the figure using the parameters in @scriptParams, attaches the
    figure to the parent Project/Dataset, and returns the file-annotation ID

    @ returns       Returns the id of the originalFileLink child. (ID object,
                    not value)
    """

    log("Thumbnail figure created by OMERO")
    log("")

    message = ""

    # Get the objects (images or datasets)
    objects, logMessage = scriptUtil.getObjects(conn, scriptParams)
    message += logMessage
    if not objects:
        return None, message

    # Get parent
    parent = None
    if "Parent_ID" in scriptParams and len(scriptParams["IDs"]) > 1:
        if scriptParams["Data_Type"] == "Image":
            parent = conn.getObject("Dataset", scriptParams["Parent_ID"])
        else:
            parent = conn.getObject("Project", scriptParams["Parent_ID"])

    if parent is None:
        parent = objects[0]  # Attach figure to the first object

    parentClass = parent.OMERO_CLASS
    log("Figure will be linked to %s%s: %s" %
        (parentClass[0].lower(), parentClass[1:], parent.getName()))

    tagIds = []
    if "Tag_IDs" in scriptParams:
        tagIds = scriptParams['Tag_IDs']
    if len(tagIds) == 0:
        tagIds = None

    showUntagged = False
    if (tagIds):
        showUntagged = scriptParams["Show_Untagged_Images"]

    thumbSize = scriptParams["Thumbnail_Size"]
    maxColumns = scriptParams["Max_Columns"]

    figHeight = 0
    figWidth = 0
    dsCanvases = []

    if scriptParams["Data_Type"] == "Dataset":
        for dataset in objects:
            log("Dataset: %s     ID: %d" %
                (dataset.getName(), dataset.getId()))
            images = list(dataset.listChildren())
            title = dataset.getName().decode('utf8')
            dsCanvas = paintDatasetCanvas(conn,
                                          images,
                                          title,
                                          tagIds,
                                          showUntagged,
                                          length=thumbSize,
                                          colCount=maxColumns)
            if dsCanvas is None:
                continue
            dsCanvases.append(dsCanvas)
            figHeight += dsCanvas.size[1]
            figWidth = max(figWidth, dsCanvas.size[0])
    else:
        imageCanvas = paintDatasetCanvas(conn,
                                         objects,
                                         "",
                                         tagIds,
                                         showUntagged,
                                         length=thumbSize,
                                         colCount=maxColumns)
        dsCanvases.append(imageCanvas)
        figHeight += imageCanvas.size[1]
        figWidth = max(figWidth, imageCanvas.size[0])

    if len(dsCanvases) == 0:
        message += "No figure created"
        return None, message

    figure = Image.new("RGB", (figWidth, figHeight), WHITE)
    y = 0
    for ds in dsCanvases:
        imgUtil.pasteImage(ds, figure, 0, y)
        y += ds.size[1]

    log("")
    figLegend = "\n".join(logLines)

    format = scriptParams["Format"]
    figureName = scriptParams["Figure_Name"]
    figureName = os.path.basename(figureName)
    output = "localfile"

    if format == 'PNG':
        output = output + ".png"
        figureName = figureName + ".png"
        figure.save(output, "PNG")
        mimetype = "image/png"
    elif format == 'TIFF':
        output = output + ".tiff"
        figureName = figureName + ".tiff"
        figure.save(output, "TIFF")
        mimetype = "image/tiff"
    else:
        output = output + ".jpg"
        figureName = figureName + ".jpg"
        figure.save(output)
        mimetype = "image/jpeg"

    namespace = NSCREATED + "/omero/figure_scripts/Thumbnail_Figure"
    fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation(
        conn,
        output,
        parent,
        output="Thumbnail figure",
        mimetype=mimetype,
        ns=namespace,
        desc=figLegend,
        origFilePathAndName=figureName)
    message += faMessage

    return fileAnnotation, message
def getSplitView(session, imageIds, pixelIds, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, 
        mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, 
        overlayColour, roiZoom, roiLabel):
    """ This method makes a figure of a number of images, arranged in rows with each row being the split-view
    of a single image. The channels are arranged left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the server, but it's channels will be
    turned on/off according to @mergedIndexes. 
    
    The figure is returned as a PIL 'Image' 
    
    @ session            session for server access
    @ pixelIds            a list of the Ids for the pixels we want to display
    @ splitIndexes         a list of the channel indexes to display. Same channels for each image/row
    @ channelNames         the Map of index:names for all channels
    @ zStart            the start of Z-range for projection
    @ zEnd                 the end of Z-range for projection
    @ colourChannels     the colour to make each column/ channel
    @ mergedIndexes      list or set of channels in the merged image 
    @ mergedColours     index: colour dictionary of channels in the merged image
    @ width            the size in pixels to show each panel
    @ height        the size in pixels to show each panel
    @ spacer        the gap between images and around the figure. Doubled between rows. 
    """
    
    roiService = session.getRoiService()
    re = session.createRenderingEngine()
    queryService = session.getQueryService()    # only needed for movie
    
    # establish dimensions and roiZoom for the primary image
    # getTheseValues from the server
    rect = getRectangle(roiService, imageIds[0], roiLabel)
    if rect == None:
        raise("No ROI found for the first image.")
    roiX, roiY, roiWidth, roiHeight, yMin, yMax, tMin, tMax = rect
    
    roiOutline = ((max(width, height)) / 200 ) + 1
    
    if roiZoom == None:
        # get the pixels for priamry image. 
        pixels = queryService.get("Pixels", pixelIds[0])
        sizeY = pixels.getSizeY().getValue()
    
        roiZoom = float(height) / float(roiHeight)
        log("ROI zoom set by primary image is %F X" % roiZoom)
    else:
        log("ROI zoom: %F X" % roiZoom)
    
    textGap = spacer/3
    fontsize = 12
    if width > 500:
        fontsize = 48
    elif width > 400:
        fontsize = 36
    elif width > 300:
        fontsize = 24
    elif width > 200:
        fontsize = 16
    font = imgUtil.getFont(fontsize)
    textHeight = font.getsize("Textq")[1]
    maxCount = 0
    for row in imageLabels:
        maxCount = max(maxCount, len(row))
    leftTextWidth = (textHeight + textGap) * maxCount + spacer
    
    maxSplitPanelWidth = 0
    totalcanvasHeight = 0
    mergedImages = []
    roiSplitPanes = []
    topSpacers = []         # space for labels above each row
    
    showLabelsAboveEveryRow = False
    invalidImages = []      # note any image row indexes that don't have ROIs. 
    
    for row, pixelsId in enumerate(pixelIds):
        log("Rendering row %d" % (row))
        
        if showLabelsAboveEveryRow:    showTopLabels = True
        else: showTopLabels = (row == 0)    # only show top labels for first row
        
        # need to get the roi dimensions from the server
        imageId = imageIds[row]
        roi = getRectangle(roiService, imageId, roiLabel)
        if roi == None:
            log("No Rectangle ROI found for this image")
            invalidImages.append(row)
            continue
        roiX, roiY, roiWidth, roiHeight, zMin, zMax, tStart, tEnd = roi
        
        pixels = queryService.get("Pixels", pixelsId)
        sizeX = pixels.getSizeX().getValue()
        sizeY = pixels.getSizeY().getValue()
        
        zStart = zMin
        zEnd = zMax
        
        # work out if any additional zoom is needed (if the full-sized image is different size from primary image)
        fullSize =  (sizeX, sizeY)
        imageZoom = imgUtil.getZoomFactor(fullSize, width, height)
        if imageZoom != 1.0:
            log("  Scaling down the full-size image by a factor of %F" % imageZoom)
        
        log("  ROI location (top-left) x: %d  y: %d  and size width: %d  height: %d" % (roiX, roiY, roiWidth, roiHeight))
        log("  ROI time: %d - %d   zRange: %d - %d" % (tStart+1, tEnd+1, zStart+1, zEnd+1))
        # get the split pane and full merged image
        roiSplitPane, fullMergedImage, topSpacer = getROIsplitView    (re, pixels, zStart, zEnd, splitIndexes, channelNames, 
            mergedNames, colourChannels, mergedIndexes, mergedColours, roiX, roiY, roiWidth, roiHeight, roiZoom, tStart, spacer, algorithm, 
            stepping, fontsize, showTopLabels)
            
        
        # and now zoom the full-sized merged image, add scalebar 
        mergedImage = imgUtil.resizeImage(fullMergedImage, width, height)
        if scalebar:
            xIndent = spacer
            yIndent = xIndent
            sbar = float(scalebar) / imageZoom            # and the scale bar will be half size
            if not addScalebar(sbar, xIndent, yIndent, mergedImage, pixels, overlayColour):
                log("  Failed to add scale bar: Pixel size not defined or scale bar is too large.")
                
        # draw ROI onto mergedImage...
        # recalculate roi if the image has been zoomed
        x = roiX / imageZoom
        y = roiY / imageZoom
        roiX2 = (roiX + roiWidth) / imageZoom
        roiY2 = (roiY + roiHeight) / imageZoom
        drawRectangle(mergedImage, x, y, roiX2, roiY2, overlayColour, roiOutline)
        
        # note the maxWidth of zoomed panels and total height for row
        maxSplitPanelWidth = max(maxSplitPanelWidth, roiSplitPane.size[0])
        totalcanvasHeight += spacer + max(height+topSpacer, roiSplitPane.size[1])
        
        mergedImages.append(mergedImage)
        roiSplitPanes.append(roiSplitPane)
        topSpacers.append(topSpacer)
    
    # remove the labels for the invalid images (without ROIs)
    invalidImages.reverse()
    for row in invalidImages:
        del imageLabels[row]
        
    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2 spacer top and bottom
    canvasWidth = leftTextWidth + width + spacer + maxSplitPanelWidth + spacer    # 
    figureSize = (canvasWidth, totalcanvasHeight + spacer)
    figureCanvas = Image.new("RGB", figureSize, (255,255,255))
    
    rowY = spacer
    for row, image in enumerate(mergedImages):
        labelCanvas = getVerticalLabels(imageLabels[row], font, textGap)
        vOffset = (image.size[1] - labelCanvas.size[1]) / 2
        imgUtil.pasteImage(labelCanvas, figureCanvas, spacer/2, rowY+topSpacers[row]+ vOffset)
        imgUtil.pasteImage(image, figureCanvas, leftTextWidth, rowY+topSpacers[row])
        x = leftTextWidth + width + spacer
        imgUtil.pasteImage(roiSplitPanes[row], figureCanvas, x, rowY)
        rowY = rowY + max(image.size[1]+topSpacers[row], roiSplitPanes[row].size[1])+ spacer

    return figureCanvas
Пример #25
0
def makeFrapFigure(session, commandArgs):
	"""
	Main method called to make the figure. 
	Returns fileID object of the child of the fileAnnotation
	"""
	roiService = session.getRoiService()
	queryService = session.getQueryService()
	updateService = session.getUpdateService()
	rawFileStore = session.createRawFileStore()
	rawPixelStore = session.createRawPixelsStore()
	renderingEngine = session.createRenderingEngine()
	
	imageId = commandArgs["imageId"]
	
	theC = 0
	if "theC" in commandArgs:
		theC = commandArgs["theC"]
	
	image = queryService.get("Image", imageId)
	imageName = image.getName().getValue()
	
	query_string = "select p from Pixels p join fetch p.image i join fetch p.pixelsType pt where i.id='%d'" % imageId
	pixels = queryService.findByQuery(query_string, None)
	
	#pixels = image.getPrimaryPixels()
	pixelsId = pixels.getId().getValue()
	
	
	#sizeX = pixels.getSizeX().getValue()
	#sizeY = pixels.getSizeY().getValue()
	#sizeZ = pixels.getSizeZ().getValue()
	#sizeC = pixels.getSizeC().getValue()
	#sizeT = pixels.getSizeT().getValue()
	
	bypassOriginalFile = True
	rawPixelStore.setPixelsId(pixelsId, bypassOriginalFile)

	roiLabels = ["FRAP", "Base", "Whole"]
	
	roiMap = getEllipses(roiService, imageId, roiLabels)
	
	for l in roiLabels:
		if l not in roiMap.keys():
			print "ROI: '%s' not found. Cannot calculate FRAP" % l
			return
			
			
	frapMap = roiMap["FRAP"]
	baseMap = roiMap["Base"]
	wholeMap = roiMap["Whole"]
	
	# make a list of the t indexes that have all 3 of the Shapes we need. 
	# and a list of the roiShapes for easy access.
	tIndexes = []
	frapROI = []
	baseROI = []
	wholeROI = []
	for t in frapMap.keys():
		if t in baseMap.keys() and t in wholeMap.keys():
			tIndexes.append(t)	
			frapROI.append(frapMap[t])	
			baseROI.append(baseMap[t])	
			wholeROI.append(wholeMap[t])
	tIndexes.sort()
	
	log("T Indexes, " + ",".join([str(t) for t in tIndexes]))
	
	# get the actual plane times. 
	timeMap = figUtil.getTimes(queryService, pixelsId, tIndexes, theZ=0, theC=0)
	timeList = []
	for t in tIndexes:
		if t in timeMap:	
			timeList.append(timeMap[t])
		else:	# handles images which don't have PlaneInfo
			timeMap[t] = t
			timeList.append(t)
			
	log("Plane times (secs), " + ",".join([str(t) for t in timeList]))
	
	# lists of averageIntensity for the 3 ROIs 
	frapValues = []
	baseValues = []
	wholeValues = []
	
	frapBleach = None
	
	theZ = 0
	for i, t in enumerate(tIndexes):
		shapes = [frapROI[i], baseROI[i], wholeROI[i]]
		theZ = frapROI[i][4]	# get theZ from the FRAP ROI
		# get a list of the average values of pixels in the three shapes. 
		averages = analyseEllipses(shapes, pixels, rawPixelStore, theC, t, theZ)
		if frapBleach == None:	
			frapBleach = averages[0]
		else:
			frapBleach = min(frapBleach, averages[0])
		frapValues.append(averages[0])
		baseValues.append(averages[1])
		wholeValues.append(averages[2])

	log("FRAP Values, " + ",".join([str(v) for v in frapValues]))
	log("Base Values, " + ",".join([str(v) for v in baseValues]))
	log("Whole Values, " + ",".join([str(v) for v in wholeValues]))
	
	# find the time of the bleach event (lowest intensity )
	tBleach = frapValues.index(frapBleach)
	log("Pre-bleach frames, %d" % tBleach)
	if tBleach == 0:
		print "No pre-bleach images. Can't calculate FRAP"
		return
		
	# using frames before and after tBleach - calculate bleach ranges etc. 
	frapPre = average(frapValues[:tBleach]) - average(baseValues[:tBleach])
	wholePre = average(wholeValues[:tBleach]) - average(baseValues[:tBleach])
	wholePost = average(wholeValues[tBleach:]) - average(baseValues[tBleach:])

	# use these values to get a ratio of FRAP intensity / pre-Bleach intensity * (corrected by intensity of 'Whole' ROI)
	frapNormCorr = []
	for i in range(len(tIndexes)):
		frapNormCorr.append( (float(frapValues[i] - baseValues[i]) / frapPre) * (wholePre / float(wholeValues[i] - baseValues[i])) )
	
	log("FRAP Corrected, " + ",".join([str(v) for v in frapNormCorr]))
	
	# work out the range of recovery (bleach -> plateau) and the time to reach half of this after bleach. 
	frapBleachNormCorr = frapNormCorr[tBleach]
	plateauNormCorr = average(frapNormCorr[-5:])
	plateauMinusBleachNormCorr = plateauNormCorr - frapBleachNormCorr
	mobileFraction = plateauMinusBleachNormCorr / float(1 - frapBleachNormCorr)
	immobileFraction = 1 - mobileFraction
	halfMaxNormCorr = plateauMinusBleachNormCorr /2 + frapBleachNormCorr
	
	log("Corrected Bleach Intensity, %f" % frapBleachNormCorr)
	log("Corrected Plateau Intensity, %f" % plateauNormCorr)
	log("Plateau - Bleach, %f" % plateauMinusBleachNormCorr)
	log("Mobile Fraction, %f" % mobileFraction)
	log("Immobile Fraction, %f" % immobileFraction)
	log("Half Recovered Intensity, %f" % halfMaxNormCorr)

	# Define the T-half for this FRAP. In place of fitting an exact curve to the
	# data, find the two time-points that the half Max of recovery sits between
	# and find the T-half using a linear approximation between these two points.
	# The T-half is this solved for halfMaxNormCorr - timestamp(tBleach)
	th = None
	for t in tIndexes[tBleach:]:
		if halfMaxNormCorr < frapNormCorr[t]:
			th = tIndexes[t]
			break
	
	y1 = frapNormCorr[th-1]
	y2 = frapNormCorr[th]
	
	
	x1 = timeList[th-1]
	x2 = timeList[th]
	m1 = (y2-y1)/(x2-x1); #Gradient of the line
	c1 = y1-m1*x1;  #Y-intercept
	tHalf = (halfMaxNormCorr-c1)/m1 - timeList[tBleach]
	
	log("Bleach time, %f seconds" % timeList[tBleach])
	log("T-Half, %f seconds" % tHalf)
	
	figLegend = "\n".join(logLines)
	print figLegend
	
	# make PIL image of the last frame before FRAP
	spacer = 5
	frames = []
	ellipses = [frapROI[tBleach-1], frapROI[tBleach], frapROI[-1]]
	frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[tBleach-1]))
	frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[tBleach]))
	frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[-1]))
	figW = 450
	font = imgUtil.getFont(16)
	fontH = font.getsize("FRAP")[1]
	labels = ["Pre-Bleach", "Bleach", "Recovery"]
	imgW = (figW - (2 * spacer) ) / len(frames)
	# shrink the images by width, or maintain height if shrink not needed. 
	smallImages = [imgUtil.resizeImage(img, imgW, img.size[1]) for img in frames]
	zoomOut = 1/imgUtil.getZoomFactor(img.size, imgW, img.size[1])
	figH = smallImages[0].size[1] + spacer + fontH 
	frapCanvas = Image.new("RGB", (figW, figH), (255,255,255))
	draw = ImageDraw.Draw(frapCanvas)
	y = spacer + fontH
	x = 0
	for l, img in enumerate(frames):
		label = labels[l]
		indent = (imgW - font.getsize(label)[0]) / 2
		draw.text((x+indent, 0), label, font=font, fill=(0,0,0))
		roiImage = addEllipse(smallImages[l], ellipses[l], zoomOut)
		imgUtil.pasteImage(roiImage, frapCanvas, x, y)
		x += spacer + imgW
	#frapCanvas.show()		# bug-fixing only
	fileName = imageName + ".png"
	frapCanvas.save(fileName, "PNG")
	
	format = PNG
	output = fileName
	
	# if reportLab has imported...
	if reportLab:
		# we are going to export a PDF, not a JPEG
		format = PDF
		output = imageName + ".pdf"
		
		# create a plot of curve fitted to:  y = 1 - e(It)
		# where thalf = ln 0.5 / -I
		# http://www.embl.de/eamnet/frap/html/halftime.html
		import math
		i = 1/float(tHalf) * math.log(0.5)
		fittedPoints = []
		for t in timeList[3:]:
			print math.exp(t * i)
			f = frapBleachNormCorr + ((plateauNormCorr-frapBleachNormCorr) * (1 - math.exp(t * i)))
			fittedPoints.append(f)
		print fittedPoints
		log("Fitted: , " + str(fittedPoints))
		
		# create a plot of the FRAP data
		figHeight = 450
		figWidth = 400
		drawing = Drawing(figWidth, figHeight)
		lp = LinePlot()
		lp.x = 50
		lp.y = 50
		lp.height = 300
		lp.width = 300
		lp.data = [zip(timeList, frapNormCorr), zip(timeList[3:], fittedPoints)]
		lp.lines[0].strokeColor = colors.red
		lp.lines[0].symbol = makeMarker('Circle')
		lp.lines[1].strokeColor = colors.green
		lp.lines[1].symbol = makeMarker('Circle')
		
		drawing.add(lp)
	
		drawing.add(String(200,25, 'Time (seconds)', fontSize=12, textAnchor="middle"))
		drawing.add(String(200,figHeight-25, imageName, fontSize=12, textAnchor="middle"))
		drawing.add(String(200,figHeight-50, 'T(1/2) = %f' % tHalf, fontSize=12, textAnchor="middle"))
	
		# create an A4 canvas to make the pdf figure 
		figCanvas = canvas.Canvas(output, pagesize=A4)
		pasteX = 100
		pasteY = 75
		# add the FRAP image
		figCanvas.drawImage(fileName, pasteX-25, pasteY)
		# add the FRAP data plot
		renderPDF.draw(drawing, figCanvas, pasteX, 300, showBoundary=True)
		figCanvas.save()
	
	fileId = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, output, format, figLegend)
	return fileId