Esempio n. 1
0
 def test_resize_image(self, size):
     w, h = 512, 512
     data = numpy.zeros((h, w, 3), dtype=numpy.uint8)
     data[256, 256] = [255, 0, 0]
     img = Image.fromarray(data, 'RGB')
     result = image_utils.resize_image(img, size[0], size[1])
     assert result is not None
     width, height = result.size
     assert width == size[0]
     assert height == size[1]
Esempio n. 2
0
def createmovie_figure(conn, pixel_ids, t_indexes, z_start, z_end, width,
                       height, spacer, algorithm, stepping, scalebar,
                       overlay_colour, time_units, image_labels,
                       max_col_count):
    """
    Makes the complete Movie figure: A canvas showing an image per row with
    multiple columns showing frames from each image/movie. Labels obove each
    frame to show the time-stamp of that frame in the specified units and
    labels on the left name each image.

    @param conn             The OMERO session
    @param pixel_ids        A list of the Pixel IDs for the images in the
                            figure
    @param t_indexes        A list of tIndexes to display frames from
    @param z_start          Projection Z-start
    @param z_end            Projection Z-end
    @param width            Maximum width of panels
    @param height           Max height of panels
    @param spacer           Space between panels
    @param algorithm        Projection algorithm e.g. "MAXIMUMINTENSITY"
    @param stepping         Projecttion z-step
    @param scalebar         A number of microns for scale-bar
    @param overlay_colour   Color of the scale bar as tuple (255,255,255)
    @param time_units       A string such as "SECS"
    @param image_labels     A list of lists, corresponding to pixelIds, for
                            labelling each image with one or more strings.
    """

    mode = "RGB"
    white = (255, 255, 255)

    # create a rendering engine
    re = conn.createRenderingEngine()
    query_service = conn.getQueryService()

    row_panels = []
    total_height = 0
    total_width = 0
    max_image_width = 0
    physical_size_x = 0

    for row, pixels_id in enumerate(pixel_ids):
        log("Rendering row %d" % (row))

        pixels = query_service.get("Pixels", pixels_id)
        size_x = pixels.getSizeX().getValue()
        size_y = pixels.getSizeY().getValue()
        size_z = pixels.getSizeZ().getValue()
        size_t = pixels.getSizeT().getValue()

        if pixels.getPhysicalSizeX():
            physical_x = pixels.getPhysicalSizeX().getValue()
            units_x = pixels.getPhysicalSizeX().getSymbol()
        else:
            physical_x = 0
            units_x = ""
        if pixels.getPhysicalSizeY():
            physical_y = pixels.getPhysicalSizeY().getValue()
            units_y = pixels.getPhysicalSizeY().getSymbol()
        else:
            physical_y = 0
            units_y = ""
        log("  Pixel size: x: %s %s  y: %s %s" %
            (str(physical_x), units_x, str(physical_y), units_y))
        if row == 0:  # set values for primary image
            physical_size_x = physical_x
            physical_size_y = physical_y
        else:  # compare primary image with current one
            if physical_size_x != physical_x or physical_size_y != physical_y:
                log(" WARNING: Images have different pixel lengths. Scales"
                    " are not comparable.")

        log("  Image dimensions (pixels): x: %d  y: %d" % (size_x, size_y))
        max_image_width = max(max_image_width, size_x)

        # set up rendering engine with the pixels
        re.lookupPixels(pixels_id)
        if not re.lookupRenderingDef(pixels_id):
            re.resetDefaults()
        if not re.lookupRenderingDef(pixels_id):
            raise "Failed to lookup Rendering Def"
        re.load()

        pro_start = z_start
        pro_end = z_end
        # make sure we're within Z range for projection.
        if pro_end >= size_z:
            pro_end = size_z - 1
            if pro_start > size_z:
                pro_start = 0
            log(" WARNING: Current image has fewer Z-sections than the"
                " primary image.")

        # if we have an invalid z-range (start or end less than 0), show
        # default Z only
        if pro_start < 0 or pro_end < 0:
            pro_start = re.getDefaultZ()
            pro_end = pro_start
            log("  Display Z-section: %d" % (pro_end + 1))
        else:
            log("  Projecting z range: %d - %d   (max Z is %d)" %
                (pro_start + 1, pro_end + 1, size_z))

        # now get each channel in greyscale (or colour)
        # a list of renderedImages (data as Strings) for the split-view row
        rendered_images = []

        for time in t_indexes:
            if time >= size_t:
                log(" WARNING: This image does not have Time frame: %d. "
                    "(max is %d)" % (time + 1, size_t))
            else:
                if pro_start != pro_end:
                    rendered_img = re.renderProjectedCompressed(
                        algorithm, time, stepping, pro_start, pro_end)
                else:
                    plane_def = omero.romio.PlaneDef()
                    plane_def.z = pro_start
                    plane_def.t = time
                    rendered_img = re.renderCompressed(plane_def)
                # create images and resize, add to list
                image = Image.open(io.BytesIO(rendered_img))
                resized_image = image_utils.resize_image(image, width, height)
                rendered_images.append(resized_image)

        # make a canvas for the row of splitview images...
        # (will add time labels above each row)
        col_count = min(max_col_count, len(rendered_images))
        row_count = int(math.ceil(float(len(rendered_images)) / col_count))
        font = image_utils.get_font(width / 12)
        font_height = font.getsize("Textq")[1]
        canvas_width = ((width + spacer) * col_count) + spacer
        canvas_height = row_count * (spacer / 2 + font_height + spacer +
                                     height)
        size = (canvas_width, canvas_height)
        # create a canvas of appropriate width, height
        canvas = Image.new(mode, size, white)

        # add text labels
        query_service = conn.getQueryService()
        text_x = spacer
        text_y = spacer / 4
        col_index = 0
        time_labels = figUtil.getTimeLabels(query_service, pixels_id,
                                            t_indexes, size_t, time_units)
        for t, t_index in enumerate(t_indexes):
            if t_index >= size_t:
                continue
            time = time_labels[t]
            text_w = font.getsize(time)[0]
            inset = (width - text_w) / 2
            textdraw = ImageDraw.Draw(canvas)
            textdraw.text((text_x + inset, text_y),
                          time,
                          font=font,
                          fill=(0, 0, 0))
            text_x += width + spacer
            col_index += 1
            if col_index >= max_col_count:
                col_index = 0
                text_x = spacer
                text_y += (spacer / 2 + font_height + spacer + height)

        # add scale bar to last frame...
        if scalebar:
            scaled_image = rendered_images[-1]
            x_indent = spacer
            y_indent = x_indent
            # if we've scaled to half size, zoom = 2
            zoom = image_utils.get_zoom_factor(scaled_image.size, width,
                                               height)
            # and the scale bar will be half size
            sbar = float(scalebar) / zoom
            status, log_msg = figUtil.addScalebar(sbar, x_indent, y_indent,
                                                  scaled_image, pixels,
                                                  overlay_colour)
            log(log_msg)

        px = spacer
        py = spacer + font_height
        col_index = 0
        # paste the images in
        for i, img in enumerate(rendered_images):
            image_utils.paste_image(img, canvas, px, py)
            px = px + width + spacer
            col_index += 1
            if col_index >= max_col_count:
                col_index = 0
                px = spacer
                py += (spacer / 2 + font_height + spacer + height)

        # Add labels to the left of the panel
        canvas = add_left_labels(canvas, image_labels, row, width, spacer)

        # most should be same width anyway
        total_width = max(total_width, canvas.size[0])
        # add together the heights of each row
        total_height = total_height + canvas.size[1]

        row_panels.append(canvas)

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    figure_size = (total_width, total_height + spacer)
    figure_canvas = Image.new(mode, figure_size, white)

    row_y = spacer / 2
    for row in row_panels:
        image_utils.paste_image(row, figure_canvas, 0, row_y)
        row_y = row_y + row.size[1]

    return figure_canvas
Esempio n. 3
0
def get_split_view(conn,
                   pixel_ids,
                   z_start,
                   z_end,
                   split_indexes,
                   channel_names,
                   colour_channels,
                   merged_indexes,
                   merged_colours,
                   width=None,
                   height=None,
                   spacer=12,
                   algorithm=None,
                   stepping=1,
                   scalebar=None,
                   overlay_colour=(255, 255, 255)):
    """
    This method makes a figure of a number of images, arranged in rows with
    each row being the split-view of a single image. The channels are arranged
    left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the
    server, but it's channels will be turned on/off according to
    @merged_indexes.
    No text labels are added to the image at this stage.

    The figure is returned as a PIL 'Image'

    @ conn              session for server access
    @ pixel_ids         a list of the Ids for the pixels we want to display
    @ z_start           the start of Z-range for projection
    @ z_end             the end of Z-range for projection
    @ split_indexes     a list of the channel indexes to display. Same
                        channels for each image/row
    @ channel_names     the Map of index:names to go above the columns for
                        each split channel
    @ colour_channels   the colour to make each column/ channel
    @ merged_indexes    list or set of channels in the merged image
    @ merged_colours    index: colour dictionary of channels in the merged
                        image
    @ width             the size in pixels to show each panel
    @ height            the size in pixels to show each panel
    @ spacer            the gap between images and around the figure. Doubled
                        between rows.
    """

    if algorithm is None:  # omero::constants::projection::ProjectionType
        algorithm = ProjectionType.MAXIMUMINTENSITY
    timepoint = 0
    mode = "RGB"
    white = (255, 255, 255)

    # create a rendering engine
    re = conn.createRenderingEngine()
    query_service = conn.getQueryService()

    row_panels = []
    total_height = 0
    total_width = 0
    max_image_width = 0

    physical_size_x = 0

    log("Split View Rendering Log...")

    if z_start > -1 and z_end > -1:
        al_string = str(algorithm).replace("INTENSITY",
                                           " Intensity").capitalize()
        log("All images projected using '%s' projection with step size: "
            "%d  start: %d  end: %d" %
            (al_string, stepping, z_start + 1, z_end + 1))
    else:
        log("Images show last-viewed Z-section")

    for row, pixels_id in enumerate(pixel_ids):
        log("Rendering row %d" % (row + 1))

        pixels = query_service.get("Pixels", pixels_id)
        size_x = pixels.getSizeX().getValue()
        size_y = pixels.getSizeY().getValue()
        size_z = pixels.getSizeZ().getValue()
        size_c = pixels.getSizeC().getValue()

        if pixels.getPhysicalSizeX():
            physical_x = pixels.getPhysicalSizeX().getValue()
        else:
            physical_x = 0
        if pixels.getPhysicalSizeY():
            physical_y = pixels.getPhysicalSizeY().getValue()
        else:
            physical_y = 0
        log("  Pixel size (um): x: %.3f  y: %.3f" % (physical_x, physical_y))
        if row == 0:  # set values for primary image
            physical_size_x = physical_x
            physical_size_y = physical_y
        else:  # compare primary image with current one
            if physical_size_x != physical_x or physical_size_y != physical_y:
                log(" WARNING: Images have different pixel lengths."
                    " Scales are not comparable.")

        log("  Image dimensions (pixels): x: %d  y: %d" % (size_x, size_y))
        max_image_width = max(max_image_width, size_x)

        # set up rendering engine with the pixels
        re.lookupPixels(pixels_id)
        if not re.lookupRenderingDef(pixels_id):
            re.resetDefaults()
        if not re.lookupRenderingDef(pixels_id):
            raise "Failed to lookup Rendering Def"
        re.load()

        pro_start = z_start
        pro_end = z_end
        # make sure we're within Z range for projection.
        if pro_end >= size_z:
            pro_end = size_z - 1
            if pro_start > size_z:
                pro_start = 0
            log(" WARNING: Current image has fewer Z-sections than the"
                " primary image.")

        # if we have an invalid z-range (start or end less than 0), show
        # default Z only
        if pro_start < 0 or pro_end < 0:
            pro_start = re.getDefaultZ()
            pro_end = pro_start
            log("  Display Z-section: %d" % (pro_end + 1))
        else:
            log("  Projecting z range: %d - %d   (max Z is %d)" %
                (pro_start + 1, pro_end + 1, size_z))

        # turn on channels in merged_indexes
        for i in range(size_c):
            re.setActive(i, False)  # Turn all off first
        log("Turning on merged_indexes: %s ..." % merged_indexes)
        for i in merged_indexes:
            if i >= size_c:
                channel_mismatch = True
            else:
                re.setActive(i, True)
                if i in merged_colours:
                    re.setRGBA(i, *merged_colours[i])

        # get the combined image, using the existing rendering settings
        channels_string = ", ".join([channel_names[i] for i in merged_indexes])
        log("  Rendering merged channels: %s" % channels_string)
        if pro_start != pro_end:
            overlay = re.renderProjectedCompressed(algorithm, timepoint,
                                                   stepping, pro_start,
                                                   pro_end)
        else:
            plane_def = omero.romio.PlaneDef()
            plane_def.z = pro_start
            plane_def.t = timepoint
            overlay = re.renderCompressed(plane_def)

        # now get each channel in greyscale (or colour)
        # a list of renderedImages (data as Strings) for the split-view row
        rendered_images = []
        i = 0
        channel_mismatch = False
        # first, turn off all channels in pixels
        for i in range(size_c):
            re.setActive(i, False)

        # for each channel in the splitview...
        for index in split_indexes:
            if index >= size_c:
                # can't turn channel on - simply render black square!
                channel_mismatch = True
                rendered_images.append(None)
            else:
                re.setActive(index, True)  # turn channel on
                if colour_channels:  # if split channels are coloured...
                    if index in merged_indexes:
                        # and this channel is in the combined image
                        if index in merged_colours:
                            rgba = tuple(merged_colours[index])
                            re.setRGBA(index, *rgba)  # set coloured
                        else:
                            merged_colours[index] = re.getRGBA(index)
                    else:
                        # otherwise set white (max alpha)
                        re.setRGBA(index, 255, 255, 255, 255)
                else:
                    # if not colour_channels - channels are white
                    re.setRGBA(index, 255, 255, 255, 255)
                info = (index, re.getChannelWindowStart(index),
                        re.getChannelWindowEnd(index))
                log("  Render channel: %s  start: %d  end: %d" % info)
                if pro_start != pro_end:
                    rendered_img = re.renderProjectedCompressed(
                        algorithm, timepoint, stepping, pro_start, pro_end)
                else:
                    plane_def = omero.romio.PlaneDef()
                    plane_def.z = pro_start
                    plane_def.t = timepoint
                    rendered_img = re.renderCompressed(plane_def)
                rendered_images.append(rendered_img)
            if index < size_c:
                re.setActive(index, False)  # turn the channel off again!

        if channel_mismatch:
            log(" WARNING channel mismatch: The current image has fewer"
                " channels than the primary image.")

        # make a canvas for the row of splitview images...
        # extra image for combined image
        image_count = len(rendered_images) + 1
        canvas_width = ((width + spacer) * image_count) + spacer
        canvas_height = spacer + height
        size = (canvas_width, canvas_height)
        # create a canvas of appropriate width, height
        canvas = Image.new(mode, size, white)

        px = spacer
        py = spacer / 2
        col = 0
        # paste the images in
        for img in rendered_images:
            if img is None:
                im = Image.new(mode, (size_x, size_y), (0, 0, 0))
            else:
                im = Image.open(io.BytesIO(img))
            i = image_utils.resize_image(im, width, height)
            image_utils.paste_image(i, canvas, px, py)
            px = px + width + spacer
            col = col + 1

        # add combined image, after resizing and adding scale bar
        i = Image.open(io.BytesIO(overlay))
        scaled_image = image_utils.resize_image(i, width, height)
        if scalebar:
            x_indent = spacer
            y_indent = x_indent
            # if we've scaled to half size, zoom = 2
            zoom = image_utils.get_zoom_factor(i.size, width, height)
            # and the scale bar will be half size
            sbar = float(scalebar) / zoom
            status, log_msg = figUtil.addScalebar(sbar, x_indent, y_indent,
                                                  scaled_image, pixels,
                                                  overlay_colour)
            log(log_msg)

        image_utils.paste_image(scaled_image, canvas, px, py)

        # most should be same width anyway
        total_width = max(total_width, canvas_width)
        # add together the heights of each row
        total_height = total_height + canvas_height
        row_panels.append(canvas)

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    figure_size = (total_width, total_height + spacer)
    figure_canvas = Image.new(mode, figure_size, white)

    row_y = spacer / 2
    for row in row_panels:
        image_utils.paste_image(row, figure_canvas, 0, row_y)
        row_y = row_y + row.size[1]

    return figure_canvas
Esempio n. 4
0
def get_split_view(conn, image_ids, pixel_ids, split_indexes, channel_names,
                   merged_names, colour_channels, merged_indexes,
                   merged_colours, width, height, image_labels, spacer,
                   algorithm, stepping, scalebar, overlay_colour, roi_zoom,
                   roi_label):
    """
    This method makes a figure of a number of images, arranged in rows with
    each row being the split-view of a single image. The channels are arranged
    left to right, with the combined image added on the right.
    The combined image is rendered according to current settings on the
    server, but it's channels will be turned on/off according to
    @mergedIndexes.

    The figure is returned as a PIL 'Image'

    @ session           session for server access
    @ pixel_ids         a list of the Ids for the pixels we want to display
    @ split_indexes     a list of the channel indexes to display. Same
                        channels for each image/row
    @ channel_names     the Map of index:names for all channels
    @ colour_channels   the colour to make each column/ channel
    @ merged_indexes    list or set of channels in the merged image
    @ merged_colours    index: colour dictionary of channels in the merged
                        image
    @ width             the size in pixels to show each panel
    @ height            the size in pixels to show each panel
    @ spacer            the gap between images and around the figure. Doubled
                        between rows.
    """

    roi_service = conn.getRoiService()
    re = conn.createRenderingEngine()
    query_service = conn.getQueryService()  # only needed for movie

    # establish dimensions and roiZoom for the primary image
    # getTheseValues from the server
    rect = get_rectangle(roi_service, image_ids[0], roi_label)
    if rect is None:
        raise Exception("No ROI found for the first image.")
    roi_x, roi_y, roi_width, roi_height, y_min, y_max, t_min, t_max = rect

    roi_outline = ((max(width, height)) / 200) + 1

    if roi_zoom is None:
        # get the pixels for priamry image.
        pixels = query_service.get("Pixels", pixel_ids[0])
        size_y = pixels.getSizeY().getValue()

        roi_zoom = float(height) / float(roi_height)
        log("ROI zoom set by primary image is %F X" % roi_zoom)
    else:
        log("ROI zoom: %F X" % roi_zoom)

    text_gap = spacer / 3
    fontsize = 12
    if width > 500:
        fontsize = 48
    elif width > 400:
        fontsize = 36
    elif width > 300:
        fontsize = 24
    elif width > 200:
        fontsize = 16
    font = image_utils.get_font(fontsize)
    text_height = font.getsize("Textq")[1]
    max_count = 0
    for row in image_labels:
        max_count = max(max_count, len(row))
    left_text_width = (text_height + text_gap) * max_count + spacer

    max_split_panel_width = 0
    total_canvas_height = 0
    merged_images = []
    roi_split_panes = []
    top_spacers = []  # space for labels above each row

    show_labels_above_every_row = False
    invalid_images = []  # note any image row indexes that don't have ROIs.

    for row, pixels_id in enumerate(pixel_ids):
        log("Rendering row %d" % (row))

        if show_labels_above_every_row:
            show_top_labels = True
        else:
            show_top_labels = (row == 0)  # only show top labels for first row

        # need to get the roi dimensions from the server
        image_id = image_ids[row]
        roi = get_rectangle(roi_service, image_id, roi_label)
        if roi is None:
            log("No Rectangle ROI found for this image")
            invalid_images.append(row)
            continue

        roi_x, roi_y, roi_width, roi_height, z_min, z_max, t_start, t_end = roi

        pixels = query_service.get("Pixels", pixels_id)
        size_x = pixels.getSizeX().getValue()
        size_y = pixels.getSizeY().getValue()

        z_start = z_min
        z_end = z_max

        # work out if any additional zoom is needed (if the full-sized image
        # is different size from primary image)
        full_size = (size_x, size_y)
        image_zoom = image_utils.get_zoom_factor(full_size, width, height)
        if image_zoom != 1.0:
            log("  Scaling down the full-size image by a factor of %F" %
                image_zoom)

        log("  ROI location (top-left) x: %d  y: %d  and size width:"
            " %d  height: %d" % (roi_x, roi_y, roi_width, roi_height))
        log("  ROI time: %d - %d   zRange: %d - %d" %
            (t_start + 1, t_end + 1, z_start + 1, z_end + 1))
        # get the split pane and full merged image
        roi_split_pane, full_merged_image, top_spacer = get_roi_split_view(
            re, pixels, z_start, z_end, split_indexes, channel_names,
            merged_names, colour_channels, merged_indexes, merged_colours,
            roi_x, roi_y, roi_width, roi_height, roi_zoom, t_start, spacer,
            algorithm, stepping, fontsize, show_top_labels)

        # and now zoom the full-sized merged image, add scalebar
        merged_image = image_utils.resize_image(full_merged_image, width,
                                                height)
        if scalebar:
            x_indent = spacer
            y_indent = x_indent
            # and the scale bar will be half size
            sbar = float(scalebar) / image_zoom
            status, log_msg = figUtil.addScalebar(sbar, x_indent, y_indent,
                                                  merged_image, pixels,
                                                  overlay_colour)
            log(log_msg)

        # draw ROI onto mergedImage...
        # recalculate roi if the image has been zoomed
        x = roi_x / image_zoom
        y = roi_y / image_zoom
        roi_x2 = (roi_x + roi_width) / image_zoom
        roi_y2 = (roi_y + roi_height) / image_zoom
        draw_rectangle(merged_image, x, y, roi_x2, roi_y2, overlay_colour,
                       roi_outline)

        # note the maxWidth of zoomed panels and total height for row
        max_split_panel_width = max(max_split_panel_width,
                                    roi_split_pane.size[0])
        total_canvas_height += spacer + max(height + top_spacer,
                                            roi_split_pane.size[1])

        merged_images.append(merged_image)
        roi_split_panes.append(roi_split_pane)
        top_spacers.append(top_spacer)

    # remove the labels for the invalid images (without ROIs)
    invalid_images.reverse()
    for row in invalid_images:
        del image_labels[row]

    # make a figure to combine all split-view rows
    # each row has 1/2 spacer above and below the panels. Need extra 1/2
    # spacer top and bottom
    canvas_width = left_text_width + width + 2 * spacer + max_split_panel_width
    figure_size = (canvas_width, total_canvas_height + spacer)
    figure_canvas = Image.new("RGB", figure_size, (255, 255, 255))

    row_y = spacer
    for row, image in enumerate(merged_images):
        label_canvas = figUtil.getVerticalLabels(image_labels[row], font,
                                                 text_gap)
        v_offset = (image.size[1] - label_canvas.size[1]) / 2
        image_utils.paste_image(label_canvas, figure_canvas, spacer / 2,
                                row_y + top_spacers[row] + v_offset)
        image_utils.paste_image(image, figure_canvas, left_text_width,
                                row_y + top_spacers[row])
        x = left_text_width + width + spacer
        image_utils.paste_image(roi_split_panes[row], figure_canvas, x, row_y)
        row_y = row_y + max(image.size[1] + top_spacers[row],
                            roi_split_panes[row].size[1]) + spacer

    return figure_canvas