コード例 #1
0
def createmovie_figure(conn, pixel_ids, t_indexes, z_start, z_end, width,
                       height, spacer, algorithm, stepping, scalebar,
                       overlay_colour, time_units, image_labels,
                       max_col_count):
    """
    Makes the complete Movie figure: A canvas showing an image per row with
    multiple columns showing frames from each image/movie. Labels obove each
    frame to show the time-stamp of that frame in the specified units and
    labels on the left name each image.

    @param conn             The OMERO session
    @param pixel_ids        A list of the Pixel IDs for the images in the
                            figure
    @param t_indexes        A list of tIndexes to display frames from
    @param z_start          Projection Z-start
    @param z_end            Projection Z-end
    @param width            Maximum width of panels
    @param height           Max height of panels
    @param spacer           Space between panels
    @param algorithm        Projection algorithm e.g. "MAXIMUMINTENSITY"
    @param stepping         Projecttion z-step
    @param scalebar         A number of microns for scale-bar
    @param overlay_colour   Color of the scale bar as tuple (255,255,255)
    @param time_units       A string such as "SECS"
    @param image_labels     A list of lists, corresponding to pixelIds, for
                            labelling each image with one or more strings.
    """

    mode = "RGB"
    white = (255, 255, 255)

    # create a rendering engine
    re = conn.createRenderingEngine()
    query_service = conn.getQueryService()

    row_panels = []
    total_height = 0
    total_width = 0
    max_image_width = 0
    physical_size_x = 0

    for row, pixels_id in enumerate(pixel_ids):
        log("Rendering row %d" % (row))

        pixels = query_service.get("Pixels", pixels_id)
        size_x = pixels.getSizeX().getValue()
        size_y = pixels.getSizeY().getValue()
        size_z = pixels.getSizeZ().getValue()
        size_t = pixels.getSizeT().getValue()

        if pixels.getPhysicalSizeX():
            physical_x = pixels.getPhysicalSizeX().getValue()
            units_x = pixels.getPhysicalSizeX().getSymbol()
        else:
            physical_x = 0
            units_x = ""
        if pixels.getPhysicalSizeY():
            physical_y = pixels.getPhysicalSizeY().getValue()
            units_y = pixels.getPhysicalSizeY().getSymbol()
        else:
            physical_y = 0
            units_y = ""
        log("  Pixel size: x: %s %s  y: %s %s" %
            (str(physical_x), units_x, str(physical_y), units_y))
        if row == 0:  # set values for primary image
            physical_size_x = physical_x
            physical_size_y = physical_y
        else:  # compare primary image with current one
            if physical_size_x != physical_x or physical_size_y != physical_y:
                log(" WARNING: Images have different pixel lengths. Scales"
                    " are not comparable.")

        log("  Image dimensions (pixels): x: %d  y: %d" % (size_x, size_y))
        max_image_width = max(max_image_width, size_x)

        # set up rendering engine with the pixels
        re.lookupPixels(pixels_id)
        if not re.lookupRenderingDef(pixels_id):
            re.resetDefaults()
        if not re.lookupRenderingDef(pixels_id):
            raise "Failed to lookup Rendering Def"
        re.load()

        pro_start = z_start
        pro_end = z_end
        # make sure we're within Z range for projection.
        if pro_end >= size_z:
            pro_end = size_z - 1
            if pro_start > size_z:
                pro_start = 0
            log(" WARNING: Current image has fewer Z-sections than the"
                " primary image.")

        # if we have an invalid z-range (start or end less than 0), show
        # default Z only
        if pro_start < 0 or pro_end < 0:
            pro_start = re.getDefaultZ()
            pro_end = pro_start
            log("  Display Z-section: %d" % (pro_end + 1))
        else:
            log("  Projecting z range: %d - %d   (max Z is %d)" %
                (pro_start + 1, pro_end + 1, size_z))

        # now get each channel in greyscale (or colour)
        # a list of renderedImages (data as Strings) for the split-view row
        rendered_images = []

        for time in t_indexes:
            if time >= size_t:
                log(" WARNING: This image does not have Time frame: %d. "
                    "(max is %d)" % (time + 1, size_t))
            else:
                if pro_start != pro_end:
                    rendered_img = re.renderProjectedCompressed(
                        algorithm, time, stepping, pro_start, pro_end)
                else:
                    plane_def = omero.romio.PlaneDef()
                    plane_def.z = pro_start
                    plane_def.t = time
                    rendered_img = re.renderCompressed(plane_def)
                # create images and resize, add to list
                image = Image.open(io.BytesIO(rendered_img))
                resized_image = image_utils.resize_image(image, width, height)
                rendered_images.append(resized_image)

        # make a canvas for the row of splitview images...
        # (will add time labels above each row)
        col_count = min(max_col_count, len(rendered_images))
        row_count = int(math.ceil(float(len(rendered_images)) / col_count))
        font = image_utils.get_font(width / 12)
        font_height = font.getsize("Textq")[1]
        canvas_width = ((width + spacer) * col_count) + spacer
        canvas_height = row_count * (spacer / 2 + font_height + spacer +
                                     height)
        size = (canvas_width, canvas_height)
        # create a canvas of appropriate width, height
        canvas = Image.new(mode, size, white)

        # add text labels
        query_service = conn.getQueryService()
        text_x = spacer
        text_y = spacer / 4
        col_index = 0
        time_labels = figUtil.getTimeLabels(query_service, pixels_id,
                                            t_indexes, size_t, time_units)
        for t, t_index in enumerate(t_indexes):
            if t_index >= size_t:
                continue
            time = time_labels[t]
            text_w = font.getsize(time)[0]
            inset = (width - text_w) / 2
            textdraw = ImageDraw.Draw(canvas)
            textdraw.text((text_x + inset, text_y),
                          time,
                          font=font,
                          fill=(0, 0, 0))
            text_x += width + spacer
            col_index += 1
            if col_index >= max_col_count:
                col_index = 0
                text_x = spacer
                text_y += (spacer / 2 + font_height + spacer + height)

        # add scale bar to last frame...
        if scalebar:
            scaled_image = rendered_images[-1]
            x_indent = spacer
            y_indent = x_indent
            # if we've scaled to half size, zoom = 2
            zoom = image_utils.get_zoom_factor(scaled_image.size, width,
                                               height)
            # and the scale bar will be half size
            sbar = float(scalebar) / zoom
            status, log_msg = figUtil.addScalebar(sbar, x_indent, y_indent,
                                                  scaled_image, pixels,
                                                  overlay_colour)
            log(log_msg)

        px = spacer
        py = spacer + font_height
        col_index = 0
        # paste the images in
        for i, img in enumerate(rendered_images):
            image_utils.paste_image(img, canvas, px, py)
            px = px + width + spacer
            col_index += 1
            if col_index >= max_col_count:
                col_index = 0
                px = spacer
                py += (spacer / 2 + font_height + spacer + height)

        # Add labels to the left of the panel
        canvas = add_left_labels(canvas, image_labels, row, width, spacer)

        # most should be same width anyway
        total_width = max(total_width, canvas.size[0])
        # add together the heights of each row
        total_height = total_height + canvas.size[1]

        row_panels.append(canvas)

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    figure_size = (total_width, total_height + spacer)
    figure_canvas = Image.new(mode, figure_size, white)

    row_y = spacer / 2
    for row in row_panels:
        image_utils.paste_image(row, figure_canvas, 0, row_y)
        row_y = row_y + row.size[1]

    return figure_canvas
コード例 #2
0
def make_split_view_figure(conn,
                           pixel_ids,
                           z_start,
                           z_end,
                           split_indexes,
                           channel_names,
                           colour_channels,
                           merged_indexes,
                           merged_colours,
                           merged_names,
                           width,
                           height,
                           image_labels=None,
                           algorithm=None,
                           stepping=1,
                           scalebar=None,
                           overlay_colour=(255, 255, 255)):
    """
    This method makes a figure of a number of images, arranged in rows with
    each row being the split-view of a single image. The channels are arranged
    left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the
    server, but it's channels will be turned on/off according to
    @merged_indexes.
    The colour of each channel turned white if colour_channels is false or the
    channel is not in the merged image.
    Otherwise channel is changed to merged_colours[i]
    Text is added at the top of the figure, to display channel names above
    each column, and the combined image may have it's various channels named
    in coloured text. The optional image_labels is a list of string lists for
    naming the images at the left of the figure (Each image may have 0 or
    multiple labels).

    The figure is returned as a PIL 'Image'

    @ conn              session for server access
    @ pixel_ids         a list of the Ids for the pixels we want to display
    @ z_start           the start of Z-range for projection
    @ z_end             the end of Z-range for projection
    @ split_indexes     a list of the channel indexes to display. Same
                        channels for each image/row
    @ channel_names     map of index:name to go above the columns for each
                        split channel
    @ colour_channels   true if split channels are
    @ merged_indexes    list (or set) of channels in the merged image
    @ merged_colours    index: colour map of channels in the merged image
    @ merged_names      if true, label with merged panel with channel names
                        (otherwise, label "Merged")
    @ width             the width of primary image (all images zoomed to this
                        height)
    @ height            the height of primary image
    @ image_labels      optional list of string lists.
    @ algorithm         for projection MAXIMUMINTENSITY or MEANINTENSITY
    @ stepping          projection increment
    """

    fontsize = 12
    if width > 500:
        fontsize = 48
    elif width > 400:
        fontsize = 36
    elif width > 300:
        fontsize = 24
    elif width > 200:
        fontsize = 16

    spacer = (width / 25) + 2
    text_gap = 3  # gap between text and image panels
    left_text_width = 0
    text_height = 0

    # get the rendered splitview, with images surrounded on all sides by
    # spacer
    sv = get_split_view(conn, pixel_ids, z_start, z_end, split_indexes,
                        channel_names, colour_channels, merged_indexes,
                        merged_colours, width, height, spacer, algorithm,
                        stepping, scalebar, overlay_colour)

    font = image_utils.get_font(fontsize)
    mode = "RGB"
    white = (255, 255, 255)
    text_height = font.getsize("Textq")[1]

    # if adding text to the left, write the text on horizontal canvas, then
    # rotate to vertical (below)
    if image_labels:
        # find max number of labels
        max_count = 0
        for row in image_labels:
            max_count = max(max_count, len(row))
        left_text_width = (text_height + text_gap) * max_count
        # make the canvas as wide as the panels height
        size = (sv.size[1], left_text_width)
        text_canvas = Image.new(mode, size, white)
        textdraw = ImageDraw.Draw(text_canvas)
        px = spacer
        image_labels.reverse()
        for row in image_labels:
            py = left_text_width - text_gap  # start at bottom
            for l, label in enumerate(row):
                py = py - text_height  # find the top of this row
                w = textdraw.textsize(label, font=font)[0]
                inset = int((height - w) / 2)
                textdraw.text((px + inset, py),
                              label,
                              font=font,
                              fill=(0, 0, 0))
                py = py - text_gap  # add space between rows
            px = px + spacer + height  # spacer between each row

    top_text_height = text_height + text_gap
    if (merged_names):
        top_text_height = ((text_height) * len(merged_indexes))
    # make a canvas big-enough to add text to the images.
    canvas_width = left_text_width + sv.size[0]
    canvas_height = top_text_height + sv.size[1]
    size = (canvas_width, canvas_height)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    # add the split-view panel
    paste_x = left_text_width
    paste_y = top_text_height
    image_utils.paste_image(sv, canvas, paste_x, paste_y)

    draw = ImageDraw.Draw(canvas)

    # add text to rows
    # want it to be vertical. Rotate and paste the text canvas from above
    if image_labels:
        text_v = text_canvas.rotate(90, expand=True)
        image_utils.paste_image(text_v, canvas, spacer, top_text_height)

    # add text to columns
    px = spacer + left_text_width
    # edges of panels - rowHeight
    py = top_text_height + spacer - (text_height + text_gap)
    for index in split_indexes:
        # calculate the position of the text, centered above the image
        w = font.getsize(channel_names[index])[0]
        inset = int((width - w) / 2)
        # text is coloured if channel is grey AND in the merged image
        rgba = (0, 0, 0, 255)
        if index in merged_indexes:
            if (not colour_channels) and (index in merged_colours):
                rgba = tuple(merged_colours[index])
                if rgba == (255, 255, 255, 255):  # if white (unreadable),
                    # needs to be black!
                    rgba = (0, 0, 0, 255)
        draw.text((px + inset, py), channel_names[index], font=font, fill=rgba)
        px = px + width + spacer

    # add text for combined image
    if (merged_names):
        merged_indexes.reverse()
        for index in merged_indexes:
            rgba = (0, 0, 0, 255)
            if index in merged_colours:
                rgba = tuple(merged_colours[index])
                log("%s %s %s" % (index, channel_names[index], rgba))
                if rgba == (255, 255, 255, 255):  # if white (unreadable),
                    # needs to be black!
                    rgba = (0, 0, 0, 255)
            name = channel_names[index]
            comb_text_width = font.getsize(name)[0]
            inset = int((width - comb_text_width) / 2)
            draw.text((px + inset, py), name, font=font, fill=rgba)
            py = py - text_height
    else:
        comb_text_width = font.getsize("Merged")[0]
        inset = int((width - comb_text_width) / 2)
        px = px + inset
        draw.text((px, py), "Merged", font=font, fill=(0, 0, 0))

    return canvas
コード例 #3
0
def add_left_labels(panel_canvas, image_labels, row_index, width, spacer):
    """
    Takes a canvas of panels and adds one or more labels to the left,
    with the text aligned vertically.
    NB: We are passed the set of labels for ALL image panels (as well as the
    index of the current image panel) so that we know what is the max label
    count and can give all panels the same margin on the left.

    @param panelCanvas:     PIL image - add labels to the left of this
    @param imageLabels:     A series of label lists, one per image. We only
                            add labels from one list
    @param rowIndex:        The index of the label list we're going to use
                            from imageLabels
    @param width:           Simply used for finding a suitable font size
    @param spacer:          Space between panels
    """

    # add lables to row...
    mode = "RGB"
    white = (255, 255, 255)
    font = image_utils.get_font(width / 12)
    text_height = font.getsize("Sampleq")[1]
    text_gap = spacer / 2

    # find max number of labels
    max_count = 0
    for row in image_labels:
        max_count = max(max_count, len(row))
    left_text_height = (text_height + text_gap) * max_count
    # make the canvas as wide as the panels height
    left_text_width = panel_canvas.size[1]
    size = (left_text_width, left_text_height)
    text_canvas = Image.new(mode, size, white)
    textdraw = ImageDraw.Draw(text_canvas)

    labels = image_labels[row_index]
    py = left_text_height - text_gap  # start at bottom
    for l, label in enumerate(labels):
        py = py - text_height  # find the top of this row
        w = textdraw.textsize(label, font=font)[0]
        inset = int((left_text_width - w) / 2)
        textdraw.text((inset, py), label, font=font, fill=(0, 0, 0))
        py = py - text_gap  # add space between rows

    # make a canvas big-enough to add text to the images.
    canvas_width = left_text_height + panel_canvas.size[0]
    # TextHeight will be width once rotated
    canvas_height = panel_canvas.size[1]
    size = (canvas_width, canvas_height)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    # add the panels to the canvas
    paste_x = left_text_height
    paste_y = 0
    image_utils.paste_image(panel_canvas, canvas, paste_x, paste_y)

    # add text to rows
    # want it to be vertical. Rotate and paste the text canvas from above
    if image_labels:
        text_v = text_canvas.rotate(90, expand=True)
        image_utils.paste_image(text_v, canvas, spacer / 2, 0)

    return canvas
コード例 #4
0
def get_roi_split_view(re, pixels, z_start, z_end, split_indexes,
                       channel_names, merged_names, colour_channels,
                       merged_indexes, merged_colours, roi_x, roi_y, roi_width,
                       roi_height, roi_zoom, t_index, spacer, algorithm,
                       stepping, fontsize, show_top_labels):
    """
    This takes a ROI rectangle from an image and makes a split view canvas of
    the region in the ROI, zoomed by a defined factor.

    @param    re        The OMERO rendering engine.
    """

    if algorithm is None:  # omero::constants::projection::ProjectionType
        algorithm = ProjectionType.MAXIMUMINTENSITY
    mode = "RGB"
    white = (255, 255, 255)

    size_x = pixels.getSizeX().getValue()
    size_y = pixels.getSizeY().getValue()
    size_z = pixels.getSizeZ().getValue()
    size_c = pixels.getSizeC().getValue()

    if pixels.getPhysicalSizeX():
        physical_x = pixels.getPhysicalSizeX().getValue()
    else:
        physical_x = 0
    if pixels.getPhysicalSizeY():
        physical_y = pixels.getPhysicalSizeY().getValue()
    else:
        physical_y = 0
    log("  Pixel size (um): x: %.3f  y: %.3f" % (physical_x, physical_y))
    log("  Image dimensions (pixels): x: %d  y: %d" % (size_x, size_y))

    log(" Projecting ROIs...")
    pro_start = z_start
    pro_end = z_end
    # make sure we're within Z range for projection.
    if pro_end >= size_z:
        pro_end = size_z - 1
        if pro_start > size_z:
            pro_start = 0
        log(" WARNING: Current image has fewer Z-sections than the primary"
            " image projection.")
    if pro_start < 0:
        pro_start = 0
    log("  Projecting z range: %d - %d   (max Z is %d)" %
        (pro_start + 1, pro_end + 1, size_z))
    # set up rendering engine with the pixels
    pixels_id = pixels.getId().getValue()
    re.lookupPixels(pixels_id)
    if not re.lookupRenderingDef(pixels_id):
        re.resetDefaults()
    if not re.lookupRenderingDef(pixels_id):
        raise "Failed to lookup Rendering Def"
    re.load()

    # if we are missing some merged colours, get them from rendering engine.
    for index in merged_indexes:
        if index not in merged_colours:
            color = tuple(re.getRGBA(index))
            merged_colours[index] = color

    # now get each channel in greyscale (or colour)
    # a list of renderedImages (data as Strings) for the split-view row
    rendered_images = []
    panel_width = 0
    channel_mismatch = False
    # first, turn off all channels in pixels
    for i in range(size_c):
        re.setActive(i, False)

    # for each channel in the splitview...
    box = (roi_x, roi_y, roi_x + roi_width, roi_y + roi_height)
    for index in split_indexes:
        if index >= size_c:
            # can't turn channel on - simply render black square!
            channel_mismatch = True
        else:
            re.setActive(index, True)  # turn channel on
            if colour_channels:
                # if split channels are coloured...
                if index in merged_colours:
                    # and this channel is in the combined image
                    rgba = tuple(merged_colours[index])
                    re.setRGBA(index, *rgba)  # set coloured
                else:
                    re.setRGBA(index, 255, 255, 255, 255)
            else:
                # if not colourChannels - channels are white
                re.setRGBA(index, 255, 255, 255, 255)
            info = (channel_names[index], re.getChannelWindowStart(index),
                    re.getChannelWindowEnd(index))
            log("  Render channel: %s  start: %d  end: %d" % info)
            if pro_start == pro_end:
                # if it's a single plane, we can render a region (region not
                # supported with projection)
                plane_def = omero.romio.PlaneDef()
                plane_def.z = long(pro_start)
                plane_def.t = long(t_index)
                region_def = omero.romio.RegionDef()
                region_def.x = roi_x
                region_def.y = roi_y
                region_def.width = roi_width
                region_def.height = roi_height
                plane_def.region = region_def
                r_plane = re.renderCompressed(plane_def)
                roi_image = Image.open(io.BytesIO(r_plane))
            else:
                projection = re.renderProjectedCompressed(
                    algorithm, t_index, stepping, pro_start, pro_end)
                full_image = Image.open(io.BytesIO(projection))
                roi_image = full_image.crop(box)
                roi_image.load()
                # hoping that when we zoom, don't zoom fullImage
            if roi_zoom is not 1:
                new_size = (int(roi_width * roi_zoom),
                            int(roi_height * roi_zoom))
                roi_image = roi_image.resize(new_size, Image.ANTIALIAS)
            rendered_images.append(roi_image)
            panel_width = roi_image.size[0]
            re.setActive(index, False)  # turn the channel off again!

    # turn on channels in mergedIndexes.
    for i in merged_indexes:
        if i >= size_c:
            channel_mismatch = True
        else:
            re.setActive(i, True)
            if i in merged_colours:
                rgba = merged_colours[i]
                re.setRGBA(i, *rgba)

    # get the combined image, using the existing rendering settings
    channels_string = ", ".join([str(i) for i in merged_indexes])
    log("  Rendering merged channels: %s" % channels_string)
    if pro_start != pro_end:
        merged = re.renderProjectedCompressed(algorithm, t_index, stepping,
                                              pro_start, pro_end)
    else:
        plane_def = omero.romio.PlaneDef()
        plane_def.z = pro_start
        plane_def.t = t_index
        merged = re.renderCompressed(plane_def)
    full_merged_image = Image.open(io.BytesIO(merged))
    roi_merged_image = full_merged_image.crop(box)
    # make sure this is not just a lazy copy of the full image
    roi_merged_image.load()

    if roi_zoom is not 1:
        new_size = (int(roi_width * roi_zoom), int(roi_height * roi_zoom))
        roi_merged_image = roi_merged_image.resize(new_size, Image.ANTIALIAS)

    if channel_mismatch:
        log(" WARNING channel mismatch: The current image has fewer channels"
            " than the primary image.")

    if panel_width == 0:  # e.g. No split-view panels
        panel_width = roi_merged_image.size[0]

    # now assemble the roi split-view canvas
    font = image_utils.get_font(fontsize)
    text_height = font.getsize("Textq")[1]
    top_spacer = 0
    if show_top_labels:
        if merged_names:
            top_spacer = (text_height * len(merged_indexes)) + spacer
        else:
            top_spacer = text_height + spacer
    image_count = len(rendered_images) + 1  # extra image for merged image
    # no spaces around panels
    canvas_width = ((panel_width + spacer) * image_count) - spacer
    canvas_height = roi_merged_image.size[1] + top_spacer

    size = (canvas_width, canvas_height)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    px = 0
    text_y = top_spacer - text_height - spacer / 2
    panel_y = top_spacer
    # paste the split images in, with channel labels
    draw = ImageDraw.Draw(canvas)
    for i, index in enumerate(split_indexes):
        label = channel_names[index]
        indent = (panel_width - (font.getsize(label)[0])) / 2
        # text is coloured if channel is not coloured AND in the merged image
        rgb = (0, 0, 0)
        if index in merged_colours:
            if not colour_channels:
                rgb = tuple(merged_colours[index])
                if rgb == (255, 255, 255, 255):
                    # if white (unreadable), needs to be black!
                    rgb = (0, 0, 0)
        if show_top_labels:
            draw.text((px + indent, text_y), label, font=font, fill=rgb)
        if i < len(rendered_images):
            image_utils.paste_image(rendered_images[i], canvas, px, panel_y)
        px = px + panel_width + spacer
    # and the merged image
    if show_top_labels:
        if (merged_names):
            for index in merged_indexes:
                if index in merged_colours:
                    rgb = tuple(merged_colours[index])
                    if rgb == (255, 255, 255, 255):
                        rgb = (0, 0, 0)
                else:
                    rgb = (0, 0, 0)
                if index in channel_names:
                    name = channel_names[index]
                else:
                    name = str(index)
                comb_text_width = font.getsize(name)[0]
                inset = int((panel_width - comb_text_width) / 2)
                draw.text((px + inset, text_y), name, font=font, fill=rgb)
                text_y = text_y - text_height
        else:
            comb_text_width = font.getsize("Merged")[0]
            inset = int((panel_width - comb_text_width) / 2)
            draw.text((px + inset, text_y),
                      "Merged",
                      font=font,
                      fill=(0, 0, 0))
    image_utils.paste_image(roi_merged_image, canvas, px, panel_y)

    # return the roi splitview canvas, as well as the full merged image
    return (canvas, full_merged_image, panel_y)
コード例 #5
0
def get_split_view(conn, image_ids, pixel_ids, split_indexes, channel_names,
                   merged_names, colour_channels, merged_indexes,
                   merged_colours, width, height, image_labels, spacer,
                   algorithm, stepping, scalebar, overlay_colour, roi_zoom,
                   roi_label):
    """
    This method makes a figure of a number of images, arranged in rows with
    each row being the split-view of a single image. The channels are arranged
    left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the
    server, but it's channels will be turned on/off according to
    @mergedIndexes.

    The figure is returned as a PIL 'Image'

    @ session           session for server access
    @ pixel_ids         a list of the Ids for the pixels we want to display
    @ split_indexes     a list of the channel indexes to display. Same
                        channels for each image/row
    @ channel_names     the Map of index:names for all channels
    @ colour_channels   the colour to make each column/ channel
    @ merged_indexes    list or set of channels in the merged image
    @ merged_colours    index: colour dictionary of channels in the merged
                        image
    @ width             the size in pixels to show each panel
    @ height            the size in pixels to show each panel
    @ spacer            the gap between images and around the figure. Doubled
                        between rows.
    """

    roi_service = conn.getRoiService()
    re = conn.createRenderingEngine()
    query_service = conn.getQueryService()  # only needed for movie

    # establish dimensions and roiZoom for the primary image
    # getTheseValues from the server
    rect = get_rectangle(roi_service, image_ids[0], roi_label)
    if rect is None:
        raise Exception("No ROI found for the first image.")
    roi_x, roi_y, roi_width, roi_height, y_min, y_max, t_min, t_max = rect

    roi_outline = ((max(width, height)) / 200) + 1

    if roi_zoom is None:
        # get the pixels for priamry image.
        pixels = query_service.get("Pixels", pixel_ids[0])
        size_y = pixels.getSizeY().getValue()

        roi_zoom = float(height) / float(roi_height)
        log("ROI zoom set by primary image is %F X" % roi_zoom)
    else:
        log("ROI zoom: %F X" % roi_zoom)

    text_gap = spacer / 3
    fontsize = 12
    if width > 500:
        fontsize = 48
    elif width > 400:
        fontsize = 36
    elif width > 300:
        fontsize = 24
    elif width > 200:
        fontsize = 16
    font = image_utils.get_font(fontsize)
    text_height = font.getsize("Textq")[1]
    max_count = 0
    for row in image_labels:
        max_count = max(max_count, len(row))
    left_text_width = (text_height + text_gap) * max_count + spacer

    max_split_panel_width = 0
    total_canvas_height = 0
    merged_images = []
    roi_split_panes = []
    top_spacers = []  # space for labels above each row

    show_labels_above_every_row = False
    invalid_images = []  # note any image row indexes that don't have ROIs.

    for row, pixels_id in enumerate(pixel_ids):
        log("Rendering row %d" % (row))

        if show_labels_above_every_row:
            show_top_labels = True
        else:
            show_top_labels = (row == 0)  # only show top labels for first row

        # need to get the roi dimensions from the server
        image_id = image_ids[row]
        roi = get_rectangle(roi_service, image_id, roi_label)
        if roi is None:
            log("No Rectangle ROI found for this image")
            invalid_images.append(row)
            continue

        roi_x, roi_y, roi_width, roi_height, z_min, z_max, t_start, t_end = roi

        pixels = query_service.get("Pixels", pixels_id)
        size_x = pixels.getSizeX().getValue()
        size_y = pixels.getSizeY().getValue()

        z_start = z_min
        z_end = z_max

        # work out if any additional zoom is needed (if the full-sized image
        # is different size from primary image)
        full_size = (size_x, size_y)
        image_zoom = image_utils.get_zoom_factor(full_size, width, height)
        if image_zoom != 1.0:
            log("  Scaling down the full-size image by a factor of %F" %
                image_zoom)

        log("  ROI location (top-left) x: %d  y: %d  and size width:"
            " %d  height: %d" % (roi_x, roi_y, roi_width, roi_height))
        log("  ROI time: %d - %d   zRange: %d - %d" %
            (t_start + 1, t_end + 1, z_start + 1, z_end + 1))
        # get the split pane and full merged image
        roi_split_pane, full_merged_image, top_spacer = get_roi_split_view(
            re, pixels, z_start, z_end, split_indexes, channel_names,
            merged_names, colour_channels, merged_indexes, merged_colours,
            roi_x, roi_y, roi_width, roi_height, roi_zoom, t_start, spacer,
            algorithm, stepping, fontsize, show_top_labels)

        # and now zoom the full-sized merged image, add scalebar
        merged_image = image_utils.resize_image(full_merged_image, width,
                                                height)
        if scalebar:
            x_indent = spacer
            y_indent = x_indent
            # and the scale bar will be half size
            sbar = float(scalebar) / image_zoom
            status, log_msg = figUtil.addScalebar(sbar, x_indent, y_indent,
                                                  merged_image, pixels,
                                                  overlay_colour)
            log(log_msg)

        # draw ROI onto mergedImage...
        # recalculate roi if the image has been zoomed
        x = roi_x / image_zoom
        y = roi_y / image_zoom
        roi_x2 = (roi_x + roi_width) / image_zoom
        roi_y2 = (roi_y + roi_height) / image_zoom
        draw_rectangle(merged_image, x, y, roi_x2, roi_y2, overlay_colour,
                       roi_outline)

        # note the maxWidth of zoomed panels and total height for row
        max_split_panel_width = max(max_split_panel_width,
                                    roi_split_pane.size[0])
        total_canvas_height += spacer + max(height + top_spacer,
                                            roi_split_pane.size[1])

        merged_images.append(merged_image)
        roi_split_panes.append(roi_split_pane)
        top_spacers.append(top_spacer)

    # remove the labels for the invalid images (without ROIs)
    invalid_images.reverse()
    for row in invalid_images:
        del image_labels[row]

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    canvas_width = left_text_width + width + 2 * spacer + max_split_panel_width
    figure_size = (canvas_width, total_canvas_height + spacer)
    figure_canvas = Image.new("RGB", figure_size, (255, 255, 255))

    row_y = spacer
    for row, image in enumerate(merged_images):
        label_canvas = figUtil.getVerticalLabels(image_labels[row], font,
                                                 text_gap)
        v_offset = (image.size[1] - label_canvas.size[1]) / 2
        image_utils.paste_image(label_canvas, figure_canvas, spacer / 2,
                                row_y + top_spacers[row] + v_offset)
        image_utils.paste_image(image, figure_canvas, left_text_width,
                                row_y + top_spacers[row])
        x = left_text_width + width + spacer
        image_utils.paste_image(roi_split_panes[row], figure_canvas, x, row_y)
        row_y = row_y + max(image.size[1] + top_spacers[row],
                            roi_split_panes[row].size[1]) + spacer

    return figure_canvas
コード例 #6
0
 def test_get_front(self, size):
     font = image_utils.get_font(size)
     assert font is not None
コード例 #7
0
def paint_dataset_canvas(conn,
                         images,
                         title,
                         tag_ids=None,
                         show_untagged=False,
                         col_count=10,
                         length=100):
    """
        Paints and returns a canvas of thumbnails from images, laid out in a
        set number of columns.
        Title and date-range of the images is printed above the thumbnails,
        to the left and right, respectively.

        @param conn:        Blitz connection
        @param image:       Image IDs
        @param title:       title to display at top of figure. String
        @param tag_ids:     Optional to sort thumbnails by tag. [long]
        @param col_count:    Max number of columns to lay out thumbnails
        @param length:      Length of longest side of thumbnails
    """

    mode = "RGB"
    fig_canvas = None
    spacing = length / 40 + 2

    thumbnail_store = conn.createThumbnailStore()
    metadata_service = conn.getMetadataService()

    if len(images) == 0:
        return None
    timestamp_min = images[0].getDate()  # datetime
    timestamp_max = timestamp_min

    ds_image_ids = []
    image_pixel_map = {}
    image_names = {}

    # sort the images by name
    images.sort(key=lambda x: (x.getName().lower()))

    for image in images:
        image_id = image.getId()
        pixel_id = image.getPrimaryPixels().getId()
        name = image.getName()
        ds_image_ids.append(image_id)  # make a list of image-IDs
        # and a map of image-ID: pixel-ID
        image_pixel_map[image_id] = pixel_id
        image_names[image_id] = name
        timestamp_min = min(timestamp_min, image.getDate())
        timestamp_max = max(timestamp_max, image.getDate())

    # set-up fonts
    fontsize = length / 7 + 5
    font = image_utils.get_font(fontsize)
    text_height = font.getsize("Textq")[1]
    top_spacer = spacing + text_height
    left_spacer = spacing + text_height

    tag_panes = []
    max_width = 0
    total_height = top_spacer

    # if we have a list of tags, then sort images by tag
    if tag_ids:
        # Cast to int since List can be any type
        tag_ids = [int(tagId) for tagId in tag_ids]
        log(" Sorting images by tags: %s" % tag_ids)
        tag_names = {}
        tagged_images = {}  # a map of tagId: list-of-image-Ids
        img_tags = {}  # a map of imgId: list-of-tagIds
        for tag_id in tag_ids:
            tagged_images[tag_id] = []

        # look for images that have a tag
        types = ["ome.model.annotations.TagAnnotation"]
        annotations = metadata_service.loadAnnotations("Image", ds_image_ids,
                                                       types, None, None)
        # filter images by annotation...
        for image_id, tags in annotations.items():
            img_tag_ids = []
            for tag in tags:
                tag_id = tag.getId().getValue()
                # make a dict of tag-names
                val = tag.getTextValue().getValue()
                tag_names[tag_id] = val.decode('utf8')
                img_tag_ids.append(tag_id)
            img_tags[image_id] = img_tag_ids

        # get a sorted list of {'iid': iid, 'tagKey': tagKey,
        # 'tagIds':orderedTags}
        sorted_thumbs = sort_images_by_tag(tag_ids, img_tags)

        if not show_untagged:
            sorted_thumbs = [t for t in sorted_thumbs if len(t['tagIds']) > 0]

        # Need to group sets of thumbnails by FIRST tag.
        toptag_sets = []
        grouped_pixel_ids = []
        show_subset_labels = False
        current_tag_str = None
        for i, img in enumerate(sorted_thumbs):
            tag_ids = img['tagIds']
            if len(tag_ids) == 0:
                tag_string = "Not Tagged"
            else:
                tag_string = tag_names[tag_ids[0]]
            if tag_string == current_tag_str or current_tag_str is None:
                # only show subset labels (later) if there are more than 1
                # subset
                if (len(tag_ids) > 1):
                    show_subset_labels = True
                grouped_pixel_ids.append({
                    'pid': image_pixel_map[img['iid']],
                    'tagIds': tag_ids
                })
            else:
                toptag_sets.append({
                    'tagText': current_tag_str,
                    'pixelIds': grouped_pixel_ids,
                    'showSubsetLabels': show_subset_labels
                })
                show_subset_labels = len(tag_ids) > 1
                grouped_pixel_ids = [{
                    'pid': image_pixel_map[img['iid']],
                    'tagIds': tag_ids
                }]
            current_tag_str = tag_string
        toptag_sets.append({
            'tagText': current_tag_str,
            'pixelIds': grouped_pixel_ids,
            'showSubsetLabels': show_subset_labels
        })

        # Find the indent we need
        max_tag_name_width = max(
            [font.getsize(ts['tagText'])[0] for ts in toptag_sets])
        if show_untagged:
            max_tag_name_width = max(max_tag_name_width,
                                     font.getsize("Not Tagged")[0])

        tag_sub_panes = []

        # make a canvas for each tag combination
        def make_tagset_canvas(tag_string, tagset_pix_ids, show_subset_labels):
            log(" Tagset: %s  (contains %d images)" %
                (tag_string, len(tagset_pix_ids)))
            if not show_subset_labels:
                tag_string = None
            sub_canvas = image_utils.paint_thumbnail_grid(thumbnail_store,
                                                          length,
                                                          spacing,
                                                          tagset_pix_ids,
                                                          col_count,
                                                          top_label=tag_string)
            tag_sub_panes.append(sub_canvas)

        for toptag_set in toptag_sets:
            tag_text = toptag_set['tagText']
            show_subset_labels = toptag_set['showSubsetLabels']
            image_data = toptag_set['pixelIds']
            # loop through all thumbs under TAG, grouping into subsets.
            tagset_pix_ids = []
            current_tag_str = None
            for i, img in enumerate(image_data):
                tag_ids = img['tagIds']
                pid = img['pid']
                tag_string = ", ".join([tag_names[tid] for tid in tag_ids])
                if tag_string == "":
                    tag_string = "Not Tagged"
                # Keep grouping thumbs under similar tag set (if not on the
                # last loop)
                if tag_string == current_tag_str or current_tag_str is None:
                    tagset_pix_ids.append(pid)
                else:
                    # Process thumbs added so far
                    make_tagset_canvas(current_tag_str, tagset_pix_ids,
                                       show_subset_labels)
                    # reset for next tagset
                    tagset_pix_ids = [pid]
                current_tag_str = tag_string

            make_tagset_canvas(current_tag_str, tagset_pix_ids,
                               show_subset_labels)

            max_width = max([c.size[0] for c in tag_sub_panes])
            total_height = sum([c.size[1] for c in tag_sub_panes])

            # paste them into a single canvas for each Tag

            left_spacer = 3 * spacing + max_tag_name_width
            # Draw vertical line to right
            size = (left_spacer + max_width, total_height)
            tag_canvas = Image.new(mode, size, WHITE)
            p_x = left_spacer
            p_y = 0
            for pane in tag_sub_panes:
                image_utils.paste_image(pane, tag_canvas, p_x, p_y)
                p_y += pane.size[1]
            if tag_text is not None:
                draw = ImageDraw.Draw(tag_canvas)
                tt_w, tt_h = font.getsize(tag_text)
                h_offset = (total_height - tt_h) / 2
                draw.text((spacing, h_offset),
                          tag_text,
                          font=font,
                          fill=(50, 50, 50))
            # draw vertical line
            draw.line((left_spacer - spacing, 0, left_spacer - spacing,
                       total_height),
                      fill=(0, 0, 0))
            tag_panes.append(tag_canvas)
            tag_sub_panes = []
    else:
        left_spacer = spacing
        pixel_ids = []
        for image_id in ds_image_ids:
            log("  Name: %s  ID: %d" % (image_names[image_id], image_id))
            pixel_ids.append(image_pixel_map[image_id])
        fig_canvas = image_utils.paint_thumbnail_grid(thumbnail_store, length,
                                                      spacing, pixel_ids,
                                                      col_count)
        tag_panes.append(fig_canvas)

    # paste them into a single canvas
    tagset_spacer = length / 3
    max_width = max([c.size[0] for c in tag_panes])
    total_height = total_height + sum(
        [c.size[1] + tagset_spacer for c in tag_panes]) - tagset_spacer
    size = (max_width, total_height)
    full_canvas = Image.new(mode, size, WHITE)
    p_x = 0
    p_y = top_spacer
    for pane in tag_panes:
        image_utils.paste_image(pane, full_canvas, p_x, p_y)
        p_y += pane.size[1] + tagset_spacer

    # create dates for the image timestamps. If dates are not the same, show
    # first - last.
    # firstdate = timestampMin
    # lastdate = timestampMax
    # figureDate = str(firstdate)
    # if firstdate != lastdate:
    #     figureDate = "%s - %s" % (firstdate, lastdate)

    draw = ImageDraw.Draw(full_canvas)
    # dateWidth = draw.textsize(figureDate, font=font)[0]
    # titleWidth = draw.textsize(title, font=font)[0]
    # dateX = fullCanvas.size[0] - spacing - dateWidth
    # title
    draw.text((left_spacer, spacing), title, font=font, fill=(0, 0, 0))
    # Don't show dates: see
    # https://github.com/openmicroscopy/openmicroscopy/pull/1002
    # if (leftSpacer+titleWidth) < dateX:
    # if there's enough space...
    #     draw.text((dateX, dateY), figureDate, font=font, fill=(0,0,0))
    # add date

    return full_canvas
コード例 #8
0
def get_roi_movie_view(re,
                       query_service,
                       pixels,
                       time_shape_map,
                       merged_indexes,
                       merged_colours,
                       roi_width,
                       roi_height,
                       roi_zoom,
                       spacer=12,
                       algorithm=None,
                       stepping=1,
                       font_size=24,
                       max_columns=None,
                       show_roi_duration=False):
    """
    This takes a ROI rectangle from an image and makes a movie canvas of the
    region in the ROI, zoomed by a defined factor.
    """

    mode = "RGB"
    white = (255, 255, 255)

    size_x = pixels.getSizeX().getValue()
    size_y = pixels.getSizeY().getValue()
    size_z = pixels.getSizeZ().getValue()
    size_c = pixels.getSizeC().getValue()
    size_t = pixels.getSizeT().getValue()

    if pixels.getPhysicalSizeX():
        physical_x = pixels.getPhysicalSizeX().getValue()
    else:
        physical_x = 0
    if pixels.getPhysicalSizeY():
        physical_y = pixels.getPhysicalSizeY().getValue()
    else:
        physical_y = 0
    log("  Pixel size (um): x: %s  y: %s" % (str(physical_x), str(physical_y)))
    log("  Image dimensions (pixels): x: %d  y: %d" % (size_x, size_y))
    log(" Projecting Movie Frame ROIs...")

    # set up rendering engine with the pixels
    pixels_id = pixels.getId().getValue()
    re.lookupPixels(pixels_id)
    if not re.lookupRenderingDef(pixels_id):
        re.resetDefaults()
    if not re.lookupRenderingDef(pixels_id):
        raise "Failed to lookup Rendering Def"
    re.load()

    # now get each channel in greyscale (or colour)
    # a list of renderedImages (data as Strings) for the split-view row
    rendered_images = []
    panel_width = 0
    channel_mismatch = False
    # first, turn off all channels in pixels
    for i in range(size_c):
        re.setActive(i, False)

    # turn on channels in mergedIndexes.
    for i in merged_indexes:
        if i >= size_c or i < 0:
            channel_mismatch = True
        else:
            re.setActive(i, True)
            if i in merged_colours:
                rgba = merged_colours[i]
                re.setRGBA(i, *rgba)

    # get the combined image, using the existing rendering settings
    channels_string = ", ".join([str(i) for i in merged_indexes])
    log("  Rendering Movie channels: %s" % channels_string)

    time_indexes = list(time_shape_map.keys())
    time_indexes.sort()

    if show_roi_duration:
        log(" Timepoints shown are ROI duration, not from start of movie")
    time_labels = figUtil.getTimeLabels(query_service, pixels_id, time_indexes,
                                        size_t, None, show_roi_duration)
    # The last value of the list will be the Units used to display time

    full_first_frame = None
    for t, timepoint in enumerate(time_indexes):
        roi_x, roi_y, pro_start, pro_end = time_shape_map[timepoint]
        box = (roi_x, roi_y, int(roi_x + roi_width), int(roi_y + roi_height))
        log("  Time-index: %d Time-label: %s  Projecting z range: %d - %d "
            "(max Z is %d) of region x: %s y: %s" %
            (timepoint + 1, time_labels[t], pro_start + 1, pro_end + 1, size_z,
             roi_x, roi_y))

        merged = re.renderProjectedCompressed(algorithm, timepoint, stepping,
                                              pro_start, pro_end)
        full_merged_image = Image.open(io.BytesIO(merged))
        if full_first_frame is None:
            full_first_frame = full_merged_image
        roi_merged_image = full_merged_image.crop(box)
        # make sure this is not just a lazy copy of the full image
        roi_merged_image.load()
        if roi_zoom != 1:
            new_size = (int(roi_width * roi_zoom), int(roi_height * roi_zoom))
            roi_merged_image = roi_merged_image.resize(new_size)
        panel_width = roi_merged_image.size[0]
        rendered_images.append(roi_merged_image)

    if channel_mismatch:
        log(" WARNING channel mismatch: The current image has fewer channels"
            " than the primary image.")

    # now assemble the roi split-view canvas, with space above for text
    col_count = len(rendered_images)
    row_count = 1
    if max_columns:
        row_count = col_count // max_columns
        if (col_count % max_columns) > 0:
            row_count += 1
        col_count = max_columns
    font = image_utils.get_font(font_size)
    text_height = font.getsize("Textq")[1]
    # no spaces around panels
    canvas_width = ((panel_width + spacer) * col_count) - spacer
    row_height = rendered_images[0].size[1] + spacer + text_height
    canvas_height = row_height * row_count
    size = (canvas_width, canvas_height)
    # create a canvas of appropriate width, height
    canvas = Image.new(mode, size, white)

    px = 0
    text_y = spacer // 2
    panel_y = text_height + spacer
    # paste the images in, with time labels
    draw = ImageDraw.Draw(canvas)

    col = 0
    for i, img in enumerate(rendered_images):
        label = time_labels[i]
        indent = (panel_width - (font.getsize(label)[0])) // 2
        draw.text((px + indent, text_y), label, font=font, fill=(0, 0, 0))
        image_utils.paste_image(img, canvas, px, panel_y)
        if col == (col_count - 1):
            col = 0
            px = 0
            text_y += row_height
            panel_y += row_height
        else:
            col += 1
            px = px + panel_width + spacer

    # return the roi splitview canvas, as well as the full merged image
    return (canvas, full_first_frame, text_height + spacer)