Exemplo n.º 1
0
def _rasterize_matplotlib(
    image,
    pclouds,
    render_lines=True,
    line_style="-",
    line_colour="b",
    line_width=1,
    render_markers=True,
    marker_style="o",
    marker_size=1,
    marker_face_colour="b",
    marker_edge_colour="b",
    marker_edge_width=1,
):
    import matplotlib.pyplot as plt

    # TODO: Since upgrading to Matplotlib 2.0 it seems that the rendering acts
    #       strangely with respect to pixel accurate rendering. The lines
    #       appear to be rendering too short - but seem correct when we reset
    #       to the classic style - so obviously there is some matplotlib setting
    #       we could just manually set to get the correct behaviour but it is
    #       a chore to find so I'm pragmatically maintaining the old behaviour
    #       here with this contextmanager.
    with plt.style.context("classic"):
        # Convert image shape into 100 DPI inches
        # This makes sure we maintain the original image size
        image_shape = np.array(image.shape)[::-1] / 100.0
        f = plt.figure(figsize=image_shape, frameon=False, dpi=100)

        image.view(figure_id=f.number, figure_size=image_shape)
        for k, p in enumerate(pclouds):
            p.view(
                figure_id=f.number,
                render_axes=False,
                figure_size=image_shape,
                render_lines=render_lines[k],
                line_style=line_style[k],
                line_colour=line_colour[k],
                line_width=line_width[k],
                render_markers=render_markers[k],
                marker_style=marker_style[k],
                marker_size=marker_size[k],
                marker_face_colour=marker_face_colour[k],
                marker_edge_colour=marker_edge_colour[k],
                marker_edge_width=marker_edge_width[k],
            )

        # Make sure the layout is tight so that the image is of the original size
        f.tight_layout(pad=0)
        # Get the pixels directly from the canvas buffer which is fast
        c_buffer, shape = f.canvas.print_to_buffer()
        # Turn buffer into numpy array and reshape to image
        shape = shape[::-1] + (-1, )
        pixels_buffer = np.frombuffer(c_buffer, dtype=np.uint8).reshape(shape)
        # Prevent matplotlib from rendering
        plt.close(f)
        # We have to apply the alpha channel to get the correct colour
        mask = pixels_buffer[..., 3:] * (1.0 / 255.0)
        pixels = (pixels_buffer[..., :3] * mask).astype(np.uint8)
        return Image.init_from_channels_at_back(pixels)
Exemplo n.º 2
0
def _attach_predictions(template_name, pixels, coordinates, heatmaps,
                        labeller_fn, show_input_images, show_combined_heatmap,
                        show_individual_heatmaps, index_to_label_fn):
    input_image = Image.init_from_channels_at_back(pixels)
    input_image.landmarks['predictions'] = PointCloud(coordinates)
    labeller(input_image, 'predictions', labeller_fn)
    del input_image.landmarks['predictions']
    images = []

    if show_input_images:
        images.append(input_image)

    if show_combined_heatmap:
        combined_heatmap = np.sum(heatmaps, axis=-1) * 255.0
        combined_heatmap = Image(combined_heatmap)
        combined_heatmap.landmarks[template_name] = input_image.landmarks[
            template_name]
        images.append(combined_heatmap)

    if show_individual_heatmaps:
        for i in range(heatmaps.shape[-1]):
            heatmap = heatmaps[..., i] * 255.0
            heatmap = Image(heatmap)

            if index_to_label_fn is not None:
                label = index_to_label_fn(i)
                #print(label)
                heatmap.landmarks[label] = PointCloud([coordinates[i]])

            images.append(heatmap)

    return images
Exemplo n.º 3
0
def _visualise_predictions(input_image, heatmaps, coordinates):
    group_sizes = [17, 17]
    group_labels = ['Endocardium', 'Epicardium']

    plt.figure()

    # input image
    menpo_image = Image.init_from_channels_at_back(input_image)
    menpo_image.landmarks['predictions'] = PointCloud(coordinates)
    labeller(menpo_image, 'predictions', left_ventricle_34)
    del menpo_image.landmarks['predictions']
    rasterised_image = menpo_image.rasterize_landmarks(group='lv_34')

    ax_input_image = plt.subplot2grid((7, 14), (0, 0), colspan=6, rowspan=6)
    ax_input_image.imshow(rasterised_image.pixels_with_channels_at_back())

    index = 0
    heatmap_plots = []

    # plot individual predictions
    for i in range(len(group_sizes)):
        for j in range(group_sizes[i]):
            axis = plt.subplot2grid((7, 14), (i, 7 + j))
            axis.imshow(heatmaps[..., index])
            index += 1

            heatmap_plots.append(axis)

    add_group_labels(heatmap_plots, group_labels, group_sizes)
    make_ticklabels_invisible(heatmap_plots)

    plt.show()
def serialize_sample(writer, subject_id):
    subject_name = 'P{}'.format(subject_id)

    for i, (video, audio, label) in enumerate(zip(*get_samples(subject_name))):

        frame = Image.init_from_channels_at_back(video)
        lms_path = landmarks_directory / subject_name / "{}.pts".format(i)

        try:
            lms = mio.import_landmark_file(lms_path)
        except Exception as e:
            print('Landmark file [{}] could not be imported'.format(i))
            print('Exception message : {}'.format(e))
            continue

        frame.landmarks['PTS'] = lms
        frame = crop_face(frame)

        example = tf.train.Example(features=tf.train.Features(
            feature={
                'sample_id': _int_feauture(i),
                'subject_id': _int_feauture(subject_id),
                'label': _bytes_feauture(label.tobytes()),
                'raw_audio': _bytes_feauture(audio.tobytes()),
                'frame': _bytes_feauture(get_jpg_string(frame))
            }))

        writer.write(example.SerializeToString())
        del video, audio, label
Exemplo n.º 5
0
def my_2d_rasterizer(im, fn=None, group=None, f=None, crop=False, message=None):
    """
    Visualisation related function. It accepts a menpo image and renders
    a **single** pair of landmarks in a new image.
    The fn offers the chance to apply a custom function to the image.
    ASSUMPTION: There is no check for the case of no landmarks in the image.
    :param im: menpo image.
    :param fn: (optional) If None, then the default .view_landmarks() is
        used for visualisation, otherwise the provided function.
    :param group: (optional) Used in case fn is None.
    :param f: (optional) Matplotlib figure to use. Leave None, unless
        you know how to modify.
    :param crop: (optional) Crop the resulting visualisation to avoid the
        the excessive white boundary. By default False.
    :param message: (optional) If None, nothing is added in the image. If a
        string is passed, then this is annotated (as text) with matplotlib
        utilities, i.e. the exact same text is written in the image.
    :return: menpo rasterised image.
    """
    if fn is None:
        f = plt.figure(frameon=False)
        if group is None:
            # in this case, assume that the first group of landmarks should suffice.
            group = im.landmarks.group_labels[0]
        r = im.view_landmarks(group=group)
    else:
        fn(im)
    if message is not None:
        assert isinstance(message, str)
        st1 = 25 + 90 * crop
        t = plt.annotate(message, xy=(st1, im.shape[0] - 10),
                         size=26, fontweight='bold', color='b')
        # set background transparency
        t.set_bbox(dict(color='w', alpha=0.5, edgecolor='w'))
    # get the image from plt
    f.tight_layout(pad=0)
    # Get the pixels directly from the canvas buffer which is fast
    c_buffer, shape = f.canvas.print_to_buffer()
    # Turn buffer into numpy array and reshape to image
    pixels_buffer = np.fromstring(c_buffer,
                                  dtype=np.uint8).reshape(shape[::-1] + (-1,))
    # Prevent matplotlib from rendering
    plt.close(f)
    # Ignore the Alpha channel
    im_plt = Image.init_from_channels_at_back(pixels_buffer[..., :3])
    # ensure that they have the same dtype as the original pixels.
    dtype = im.pixels.dtype
    if dtype != np.uint8:
        if dtype == np.float32 or dtype == np.float64:
            im_plt.pixels = im_plt.pixels.astype(dtype)
            im_plt.pixels /= 255.0
        else:
            m1 = 'Not recognised type of original dtype ({}).'
            print(m1.format(dtype))
    if crop:
            # # position to crop the rasterised image (hardcoded for now).
            cri = (50, 60)
            sh1 = im_plt.shape
            im_plt = im_plt.crop((cri[0], cri[1]), (sh1[0] + cri[0], sh1[1] + cri[1]))
    return im_plt
Exemplo n.º 6
0
def _rasterize_matplotlib(image, pclouds, render_lines=True, line_style='-',
                          line_colour='b', line_width=1, render_markers=True,
                          marker_style='o', marker_size=1,
                          marker_face_colour='b', marker_edge_colour='b',
                          marker_edge_width=1):
    import matplotlib.pyplot as plt

    # Convert image shape into 100 DPI inches
    # This makes sure we maintain the original image size
    image_shape = np.array(image.shape)[::-1] / 100.0
    f = plt.figure(figsize=image_shape, frameon=False, dpi=100)

    image.view(figure_id=f.number, figure_size=image_shape)
    for k, p in enumerate(pclouds):
        p.view(figure_id=f.number, render_axes=False, figure_size=image_shape,
               render_lines=render_lines[k], line_style=line_style[k],
               line_colour=line_colour[k], line_width=line_width[k],
               render_markers=render_markers[k], marker_style=marker_style[k],
               marker_size=marker_size[k],
               marker_face_colour=marker_face_colour[k],
               marker_edge_colour=marker_edge_colour[k],
               marker_edge_width=marker_edge_width[k])

    # Make sure the layout is tight so that the image is of the original size
    f.tight_layout(pad=0)
    # Get the pixels directly from the canvas buffer which is fast
    c_buffer, shape = f.canvas.print_to_buffer()
    # Turn buffer into numpy array and reshape to image
    pixels_buffer = np.fromstring(c_buffer,
                                  dtype=np.uint8).reshape(shape[::-1] + (-1,))
    # Prevent matplotlib from rendering
    plt.close(f)
    # Ignore the Alpha channel
    return Image.init_from_channels_at_back(pixels_buffer[..., :3])
Exemplo n.º 7
0
def get_jpg_string(im):
    # Gets the serialized jpg from a menpo `Image`.
    if not isinstance(im, Image):
        im = Image.init_from_channels_at_back(im)
    fp = BytesIO()
    mio.export_image(im, fp, extension='jpg')
    fp.seek(0)
    return fp.read()
Exemplo n.º 8
0
def _rasterize_pillow(image, pclouds, render_lines=True, line_style='-',
                      line_colour='b', line_width=1, render_markers=True,
                      marker_style='o', marker_size=1, marker_face_colour='b',
                      marker_edge_colour='b', marker_edge_width=1):
    from PIL import ImageDraw

    if any(x != '-'for x in line_style):
        raise ValueError("The Pillow rasterizer only supports the '-' "
                         "line style.")
    if any(x not in {'o', 's'} for x in marker_style):
        raise ValueError("The Pillow rasterizer only supports the 'o' and 's' "
                         "marker styles.")
    if any(x > 1 for x in marker_edge_width):
        raise ValueError('The Pillow rasterizer only supports '
                         'marker_edge_width of 1 or 0.')

    pil_im = image.as_PILImage()
    draw = ImageDraw.Draw(pil_im)

    line_colour = [_parse_colour(x) for x in line_colour]
    marker_edge_colour = [_parse_colour(x) for x in marker_edge_colour]
    marker_face_colour = [_parse_colour(x) for x in marker_face_colour]

    for k in range(len(pclouds)):
        p = pclouds[k]
        if isinstance(p, TriMesh):
            pclouds[k] = p.as_pointgraph()

        points = p.points
        if (render_lines[k] and line_width[k] > 0 and
            hasattr(p, 'edges') and p.edges.size > 0):
            edges = p.edges
            lines = zip(points[edges[:, 0], :],
                        points[edges[:, 1], :])

            for l1, l2 in lines:
                draw.line([tuple(l1[::-1]), tuple(l2[::-1])],
                          fill=line_colour[k], width=line_width[k])

        if render_markers[k] and marker_size[k] > 0:
            draw_func = (draw.ellipse if marker_style[k] == 'o'
                         else draw.rectangle)
            outline = (marker_edge_colour[k] if marker_edge_width[k] == 1
                       else None)
            for p in points:
                y, x = p
                draw_func((x - marker_size[k], y - marker_size[k],
                           x + marker_size[k], y + marker_size[k]),
                          fill=marker_face_colour[k], outline=outline)

    del draw

    pixels = np.asarray(pil_im)
    if image.n_channels == 3:
        return Image.init_from_channels_at_back(pixels)
    else:
        return Image(pixels)
Exemplo n.º 9
0
def import_image(img_path):
    img = cv2.imread(str(img_path))
    original_image = Image.init_from_channels_at_back(img[:, :, -1::-1])

    try:
        original_image_lms = mio.import_landmark_file('{}/{}.ljson'.format(
            img_path.parent, img_path.stem)).lms.points.astype(np.float32)
        original_image.landmarks['LJSON'] = PointCloud(original_image_lms)
    except:
        pass

    return original_image
Exemplo n.º 10
0
def _rasterize_matplotlib(image,
                          pclouds,
                          render_lines=True,
                          line_style='-',
                          line_colour='b',
                          line_width=1,
                          render_markers=True,
                          marker_style='o',
                          marker_size=1,
                          marker_face_colour='b',
                          marker_edge_colour='b',
                          marker_edge_width=1):
    import matplotlib.pyplot as plt

    # Convert image shape into 100 DPI inches
    # This makes sure we maintain the original image size
    image_shape = np.array(image.shape)[::-1] / 100.0
    f = plt.figure(figsize=image_shape, frameon=False, dpi=100)

    image.view(figure_id=f.number, figure_size=image_shape)
    for k, p in enumerate(pclouds):
        p.view(figure_id=f.number,
               render_axes=False,
               figure_size=image_shape,
               render_lines=render_lines[k],
               line_style=line_style[k],
               line_colour=line_colour[k],
               line_width=line_width[k],
               render_markers=render_markers[k],
               marker_style=marker_style[k],
               marker_size=marker_size[k],
               marker_face_colour=marker_face_colour[k],
               marker_edge_colour=marker_edge_colour[k],
               marker_edge_width=marker_edge_width[k])

    # Make sure the layout is tight so that the image is of the original size
    f.tight_layout(pad=0)
    # Get the pixels directly from the canvas buffer which is fast
    c_buffer, shape = f.canvas.print_to_buffer()
    # Turn buffer into numpy array and reshape to image
    pixels_buffer = np.fromstring(c_buffer,
                                  dtype=np.uint8).reshape(shape[::-1] + (-1, ))
    # Prevent matplotlib from rendering
    plt.close(f)
    # Ignore the Alpha channel
    return Image.init_from_channels_at_back(pixels_buffer[..., :3])
Exemplo n.º 11
0
def ffmpeg_importer(filepath,
                    normalize=True,
                    exact_frame_count=True,
                    **kwargs):
    r"""
    Imports videos by streaming frames from a pipe using FFMPEG. Returns a
    :map:`LazyList` that gives lazy access to the video on a per-frame basis.

    There are two important environment variables that can be set to alter
    the behaviour of this function:

        ================== ======================================
        ENV Variable       Definition
        ================== ======================================
        MENPO_FFMPEG_CMD   The path to the 'ffmpeg' executable.
        MENPO_FFPROBE_CMD  The path to the 'ffprobe' executable.
        ================== ======================================

    Parameters
    ----------
    filepath : `Path`
        Absolute filepath of the video.
    normalize : `bool`, optional
        If ``True``, normalize between 0.0 and 1.0 and convert to float. If
        ``False`` just return whatever ffmpeg imports.
    exact_frame_count: `bool`, optional
        If ``True``, the import fails if ffprobe is not available
        (reading from ffmpeg's output returns inexact frame count)
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    image : :map:`LazyList`
        A :map:`LazyList` containing :map:`Image` or subclasses per frame
        of the video.
    """
    reader = FFMpegVideoReader(filepath,
                               normalize=normalize,
                               exact_frame_count=exact_frame_count)
    ll = LazyList.init_from_index_callable(
        lambda x: Image.init_from_channels_at_back(reader[x]), len(reader))
    ll.fps = reader.fps

    return ll
Exemplo n.º 12
0
def ffmpeg_importer(filepath, normalize=True, exact_frame_count=True, **kwargs):
    r"""
    Imports videos by streaming frames from a pipe using FFMPEG. Returns a
    :map:`LazyList` that gives lazy access to the video on a per-frame basis.

    There are two important environment variables that can be set to alter
    the behaviour of this function:

        ================== ======================================
        ENV Variable       Definition
        ================== ======================================
        MENPO_FFMPEG_CMD   The path to the 'ffmpeg' executable.
        MENPO_FFPROBE_CMD  The path to the 'ffprobe' executable.
        ================== ======================================

    Parameters
    ----------
    filepath : `Path`
        Absolute filepath of the video.
    normalize : `bool`, optional
        If ``True``, normalize between 0.0 and 1.0 and convert to float. If
        ``False`` just return whatever ffmpeg imports.
    exact_frame_count: `bool`, optional
        If ``True``, the import fails if ffprobe is not available
        (reading from ffmpeg's output returns inexact frame count)
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    image : :map:`LazyList`
        A :map:`LazyList` containing :map:`Image` or subclasses per frame
        of the video.
    """
    reader = FFMpegVideoReader(filepath, normalize=normalize, exact_frame_count=exact_frame_count)
    ll = LazyList.init_from_index_callable(lambda x: Image.init_from_channels_at_back(reader[x]), len(reader))
    ll.fps = reader.fps

    return ll
Exemplo n.º 13
0
def preprocess(pixels, min_angle=-30, max_angle=30):
    r"""
    Method that applies some pre-processing on the provided image. This involves:

        1. Rotating the image about its centre with random angle.
        2. Skewing the image with random variables.

    The random variables of the two transforms are generated from a uniform
    distribution.

    Parameters
    ----------
    pixels : `array`
        The input image of shape `(height, width, n_channels)`.
    min_angle : `int`, optional
        The minimum angle value of the uniform distribution.
    max_angle : `int`
        The maximum angle value of the uniform distribution.

    Returns
    -------
    transformed_image : `array`
        The transformed image of shape `(height, width, n_channels)`.
    """
    # Create menpo image
    image = Image.init_from_channels_at_back(pixels)

    # Rotation
    theta = np.random.uniform(low=min_angle, high=max_angle)
    image = image.rotate_ccw_about_centre(theta, retain_shape=True)

    # Skew
    angles = np.random.uniform(low=min_angle, high=max_angle, size=2)
    image = skew_image(image, angles[0], angles[1])

    return image.pixels_with_channels_at_back()[..., None].astype(np.float32)
Exemplo n.º 14
0
def main():

    test_data_directory = ('./test_data_face/render')

    # load obj
    face_mesh = m3io.import_mesh('./test_data_face/mesh.obj')

    texture_index = (face_mesh.tcoords.points[:, ::-1] *
                     face_mesh.texture.shape).astype(np.int32)

    vertex_color = face_mesh.texture.pixels[:, 1 - texture_index[:, 0],
                                            texture_index[:, 1]].T

    tf.reset_default_graph()
    # Set up a basic cube centered at the origin, with vertex normals pointing
    # outwards along the line from the origin to the cube vertices:
    face_vertices = tf.constant(face_mesh.points, dtype=tf.float32)
    face_normals = tf.nn.l2_normalize(face_vertices, dim=1)
    face_triangles = tf.constant(face_mesh.trilist, dtype=tf.int32)

    # testRendersSimpleCube:
    """Renders a simple cube to test the full forward pass.

    Verifies the functionality of both the custom kernel and the python wrapper.
    """

    n_randering = 16

    model_transforms = camera_utils.euler_matrices(
        tf.random_uniform([n_randering, 3]) * np.pi / 2 -
        np.pi / 4.)[:, :3, :3]

    vertices_world_space = tf.matmul(tf.stack(
        [face_vertices for _ in range(n_randering)]),
                                     model_transforms,
                                     transpose_b=True)

    normals_world_space = tf.matmul(tf.stack(
        [face_normals for _ in range(n_randering)]),
                                    model_transforms,
                                    transpose_b=True)

    # camera position:
    eye = tf.constant(n_randering * [[0.0, 0.0, 6.0]], dtype=tf.float32)
    center = tf.constant(n_randering * [[0.0, 0.0, 0.0]], dtype=tf.float32)
    world_up = tf.constant(n_randering * [[0.0, 1.0, 0.0]], dtype=tf.float32)
    ambient_colors = tf.constant(n_randering * [[0.2, 0.2, 0.2]],
                                 dtype=tf.float32)
    image_width = 256
    image_height = 256
    light_positions = tf.constant(n_randering *
                                  [[[6.0, 6.0, 6.0], [-6.0, -6.0, 6.0]]])
    light_intensities = tf.ones([n_randering, 1, 3], dtype=tf.float32)
    vertex_diffuse_colors = tf.constant(np.stack(
        [vertex_color for _ in range(n_randering)]),
                                        dtype=tf.float32)

    rendered = mesh_renderer.mesh_renderer(
        vertices_world_space,
        triangles=face_triangles,
        normals=normals_world_space,
        diffuse_colors=vertex_diffuse_colors,
        camera_position=eye,
        camera_lookat=center,
        camera_up=world_up,
        light_positions=light_positions,
        light_intensities=light_intensities,
        image_width=image_width,
        image_height=image_height,
        ambient_color=ambient_colors)

    image_id = 0
    with tf.Session() as sess:
        fps_list = []
        while (image_id < 100):
            start_time = time.time()
            images = sess.run(rendered, feed_dict={})
            for image in images:
                target_image_name = 'Gray_face_%i.png' % image_id
                image_id += 1
                baseline_image_path = os.path.join(test_data_directory,
                                                   target_image_name)

                mio.export_image(Image.init_from_channels_at_back(
                    image[..., :3].clip(0, 1)),
                                 baseline_image_path,
                                 overwrite=True)

            end_time = time.time()
            fps = n_randering / (end_time - start_time)
            fps_list.append(fps)
            if len(fps_list) > 5:
                fps_list.pop(0)
            print(np.mean(fps_list))
Exemplo n.º 15
0
def _rasterize_pillow(
    image,
    pclouds,
    render_lines=True,
    line_style="-",
    line_colour="b",
    line_width=1,
    render_markers=True,
    marker_style="o",
    marker_size=1,
    marker_face_colour="b",
    marker_edge_colour="b",
    marker_edge_width=1,
):
    from PIL import ImageDraw

    if image.n_channels not in {1, 3}:
        raise ValueError("The Pillow rasterizer only supports grayscale or "
                         "colour images")
    if any(x != "-" for x in line_style):
        raise ValueError("The Pillow rasterizer only supports the '-' "
                         "line style.")
    if any(x not in {"o", "s"} for x in marker_style):
        raise ValueError("The Pillow rasterizer only supports the 'o' and 's' "
                         "marker styles.")
    if any(x > 1 for x in marker_edge_width):
        raise ValueError("The Pillow rasterizer only supports "
                         "marker_edge_width of 1 or 0.")

    if image.n_channels == 1:
        # Make the image RGB
        image = image.extract_channels([0, 0, 0])

    pil_im = image.as_PILImage()
    draw = ImageDraw.Draw(pil_im)

    line_colour = [_parse_colour(x) for x in line_colour]
    marker_edge_colour = [_parse_colour(x) for x in marker_edge_colour]
    marker_face_colour = [_parse_colour(x) for x in marker_face_colour]

    for k in range(len(pclouds)):
        p = pclouds[k]
        if isinstance(p, TriMesh):
            pclouds[k] = p.as_pointgraph()

        points = p.points
        if (render_lines[k] and line_width[k] > 0 and hasattr(p, "edges")
                and p.edges.size > 0):
            edges = p.edges
            lines = zip(points[edges[:, 0], :], points[edges[:, 1], :])

            for l1, l2 in lines:
                draw.line(
                    [tuple(l1[::-1]), tuple(l2[::-1])],
                    fill=line_colour[k],
                    width=line_width[k],
                )

        if render_markers[k] and marker_size[k] > 0:
            draw_func = draw.ellipse if marker_style[
                k] == "o" else draw.rectangle
            outline = marker_edge_colour[k] if marker_edge_width[
                k] == 1 else None
            for p in points:
                y, x = p
                draw_func(
                    (
                        x - marker_size[k],
                        y - marker_size[k],
                        x + marker_size[k],
                        y + marker_size[k],
                    ),
                    fill=marker_face_colour[k],
                    outline=outline,
                )

    del draw

    pixels = np.asarray(pil_im)
    if image.n_channels == 3:
        return Image.init_from_channels_at_back(pixels)
    else:
        return Image(pixels)
Exemplo n.º 16
0
def test_init_from_channels_at_back_less_dimensions():
    p = np.empty([50, 60])
    im = Image.init_from_channels_at_back(p)
    assert im.n_channels == 1
    assert im.height == 50
    assert im.width == 60
Exemplo n.º 17
0
def test_init_from_rolled_channels():
    p = np.empty([50, 60, 3])
    im = Image.init_from_channels_at_back(p)
    assert im.n_channels == 3
    assert im.height == 50
    assert im.width == 60
Exemplo n.º 18
0
def test_init_from_channels_at_back_less_dimensions():
    p = np.empty([50, 60])
    im = Image.init_from_channels_at_back(p)
    assert im.n_channels == 1
    assert im.height == 50
    assert im.width == 60
Exemplo n.º 19
0
def test_init_from_rolled_channels():
    p = np.empty([50, 60, 3])
    im = Image.init_from_channels_at_back(p)
    assert im.n_channels == 3
    assert im.height == 50
    assert im.width == 60
Exemplo n.º 20
0
def pillow_importer(filepath, asset=None, normalize=True, **kwargs):
    r"""
    Imports an image using PIL/pillow.

    Different image modes cause different importing strategies.

    RGB, L, I:
        Imported as either `float` or `uint8` depending on normalisation flag.
    RGBA:
        Imported as :map:`MaskedImage` if normalize is ``True`` else imported
        as a 4 channel `uint8` image.
    1:
        Imported as a :map:`BooleanImage`. Normalisation is ignored.
    F:
        Imported as a floating point image. Normalisation is ignored.

    Parameters
    ----------
    filepath : `Path`
        Absolute filepath of image
    asset : `object`, optional
        An optional asset that may help with loading. This is unused for this
        implementation.
    normalize : `bool`, optional
        If ``True``, normalize between 0.0 and 1.0 and convert to float. If
        ``False`` just pass whatever PIL imports back (according
        to types rules outlined in constructor).
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    image : :map:`Image` or subclass
        The imported image.
    """
    import PIL.Image as PILImage
    if isinstance(filepath, Path):
        filepath = str(filepath)
    pil_image = PILImage.open(filepath)
    mode = pil_image.mode
    if mode == 'RGBA':
        # If normalize is False, then we return the alpha as an extra
        # channel, which can be useful if the alpha channel has semantic
        # meanings!
        if normalize:
            alpha = np.array(pil_image)[..., 3].astype(np.bool)
            image_pixels = _pil_to_numpy(pil_image, True, convert='RGB')
            image = MaskedImage.init_from_channels_at_back(image_pixels,
                                                           mask=alpha)
        else:
            # With no normalisation we just return the pixels
            image = Image.init_from_channels_at_back(
                _pil_to_numpy(pil_image, False))
    elif mode in ['L', 'I', 'RGB']:
        # Greyscale, Integer and RGB images
        image = Image.init_from_channels_at_back(
            _pil_to_numpy(pil_image, normalize))
    elif mode == '1':
        # Convert to 'L' type (http://stackoverflow.com/a/4114122/1716869).
        # Can't normalize a binary image
        image = BooleanImage(_pil_to_numpy(pil_image, False, convert='L'),
                             copy=True)
    elif mode == 'P':
        # Convert pallete images to RGB
        image = Image.init_from_channels_at_back(
            _pil_to_numpy(pil_image, normalize, convert='RGB'))
    elif mode == 'F':  # Floating point images
        # Don't normalize as we don't know the scale
        image = Image.init_from_channels_at_back(
            _pil_to_numpy(pil_image, False))
    else:
        raise ValueError('Unexpected mode for PIL: {}'.format(mode))
    return image
Exemplo n.º 21
0
def _rasterize_pillow(image,
                      pclouds,
                      render_lines=True,
                      line_style='-',
                      line_colour='b',
                      line_width=1,
                      render_markers=True,
                      marker_style='o',
                      marker_size=1,
                      marker_face_colour='b',
                      marker_edge_colour='b',
                      marker_edge_width=1):
    from PIL import ImageDraw

    if any(x != '-' for x in line_style):
        raise ValueError("The Pillow rasterizer only supports the '-' "
                         "line style.")
    if any(x not in {'o', 's'} for x in marker_style):
        raise ValueError("The Pillow rasterizer only supports the 'o' and 's' "
                         "marker styles.")
    if any(x > 1 for x in marker_edge_width):
        raise ValueError('The Pillow rasterizer only supports '
                         'marker_edge_width of 1 or 0.')

    pil_im = image.as_PILImage()
    draw = ImageDraw.Draw(pil_im)

    line_colour = [_parse_colour(x) for x in line_colour]
    marker_edge_colour = [_parse_colour(x) for x in marker_edge_colour]
    marker_face_colour = [_parse_colour(x) for x in marker_face_colour]

    for k in range(len(pclouds)):
        p = pclouds[k]
        if isinstance(p, TriMesh):
            pclouds[k] = p.as_pointgraph()

        points = p.points
        if (render_lines[k] and line_width[k] > 0 and hasattr(p, 'edges')
                and p.edges.size > 0):
            edges = p.edges
            lines = zip(points[edges[:, 0], :], points[edges[:, 1], :])

            for l1, l2 in lines:
                draw.line([tuple(l1[::-1]), tuple(l2[::-1])],
                          fill=line_colour[k],
                          width=line_width[k])

        if render_markers[k] and marker_size[k] > 0:
            draw_func = (draw.ellipse
                         if marker_style[k] == 'o' else draw.rectangle)
            outline = (marker_edge_colour[k]
                       if marker_edge_width[k] == 1 else None)
            for p in points:
                y, x = p
                draw_func((x - marker_size[k], y - marker_size[k],
                           x + marker_size[k], y + marker_size[k]),
                          fill=marker_face_colour[k],
                          outline=outline)

    del draw

    pixels = np.asarray(pil_im)
    if image.n_channels == 3:
        return Image.init_from_channels_at_back(pixels)
    else:
        return Image(pixels)
Exemplo n.º 22
0
def pillow_importer(filepath, asset=None, normalize=True, **kwargs):
    r"""
    Imports an image using PIL/pillow.

    Different image modes cause different importing strategies.

    RGB, L, I:
        Imported as either `float` or `uint8` depending on normalisation flag.
    RGBA:
        Imported as :map:`MaskedImage` if normalize is ``True`` else imported
        as a 4 channel `uint8` image.
    1:
        Imported as a :map:`BooleanImage`. Normalisation is ignored.
    F:
        Imported as a floating point image. Normalisation is ignored.

    Parameters
    ----------
    filepath : `Path`
        Absolute filepath of image
    asset : `object`, optional
        An optional asset that may help with loading. This is unused for this
        implementation.
    normalize : `bool`, optional
        If ``True``, normalize between 0.0 and 1.0 and convert to float. If
        ``False`` just pass whatever PIL imports back (according
        to types rules outlined in constructor).
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    image : :map:`Image` or subclass
        The imported image.
    """
    import PIL.Image as PILImage

    if isinstance(filepath, Path):
        filepath = str(filepath)
    pil_image = PILImage.open(filepath)
    mode = pil_image.mode
    if mode == "RGBA":
        # If normalize is False, then we return the alpha as an extra
        # channel, which can be useful if the alpha channel has semantic
        # meanings!
        if normalize:
            alpha = np.array(pil_image)[..., 3].astype(bool)
            image_pixels = _pil_to_numpy(pil_image, True, convert="RGB")
            image = MaskedImage.init_from_channels_at_back(image_pixels,
                                                           mask=alpha)
        else:
            # With no normalisation we just return the pixels
            image = Image.init_from_channels_at_back(
                _pil_to_numpy(pil_image, False))
    elif mode in ["L", "I", "RGB"]:
        # Greyscale, Integer and RGB images
        image = Image.init_from_channels_at_back(
            _pil_to_numpy(pil_image, normalize))
    elif mode == "1":
        # Convert to 'L' type (http://stackoverflow.com/a/4114122/1716869).
        # Can't normalize a binary image
        image = BooleanImage(_pil_to_numpy(pil_image, False, convert="L"),
                             copy=True)
    elif mode == "P":
        # Convert pallete images to RGB
        image = Image.init_from_channels_at_back(
            _pil_to_numpy(pil_image, normalize, convert="RGB"))
    elif mode == "F":  # Floating point images
        # Don't normalize as we don't know the scale
        image = Image.init_from_channels_at_back(
            _pil_to_numpy(pil_image, False))
    else:
        raise ValueError("Unexpected mode for PIL: {}".format(mode))
    return image