Esempio n. 1
0
def test_as_pil_image_bool():
    im = BooleanImage(np.ones((120, 120), dtype=np.bool), copy=False)
    new_im = im.as_PILImage()
    assert 1
    assert_allclose(
        np.asarray(new_im.getdata()).reshape(im.pixels.shape),
        im.pixels.astype(np.uint8) * 255)
Esempio n. 2
0
def skew_image(image, theta, phi):
    r"""
    Method that skews the provided image. Note that the output image has the
    same size (shape) as the input.

    Parameters
    ----------
    image : `menpo.image.Image`
        The image to distort.
    theta : `float`
        The skew angle over x axis (tan(theta)).
    phi : `float`
        The skew angle over y axis (tan(phi)).

    Returns
    -------
    skewed_image : `menpo.image.Image`
        The skewed (distorted) image.
    """
    # Get mask of pixels
    mask = BooleanImage(image.pixels[0])
    # Create the bounding box (pointcloud) of the mask
    bbox = bounding_box(*mask.bounds_true())
    # Skew the bounding box
    new_bbox = skew_shape(bbox, theta, phi)
    # Warp the image using TPS
    pwa = ThinPlateSplines(new_bbox, bbox)

    return image.warp_to_shape(image.shape, pwa)
Esempio n. 3
0
def test_mask_creation_basics():
    mask_shape = (120, 121, 3)
    mask_region = np.ones(mask_shape)
    mask = BooleanImage(mask_region)
    assert_equal(mask.n_channels, 1)
    assert_equal(mask.n_dims, 3)
    assert_equal(mask.shape, mask_shape)
Esempio n. 4
0
 def build(self):
     r"""
     Read the image using PIL and then use the :map:`Image` constructor to
     create a class.
     """
     self._pil_image = PILImage.open(self.filepath)
     mode = self._pil_image.mode
     if mode == 'RGBA':
         # RGB with Alpha Channel
         # If we normalise it then we convert to floating point
         # and set the alpha channel to the mask
         if self.normalise:
             alpha = np.array(self._pil_image)[..., 3].astype(np.bool)
             image_pixels = self._pil_to_numpy(True, convert='RGB')
             image = MaskedImage(image_pixels, mask=alpha)
         else:
             # With no normalisation we just return the pixels
             image = Image(self._pil_to_numpy(False))
     elif mode in ['L', 'I', 'RGB']:
         # Greyscale, Integer and RGB images
         image = Image(self._pil_to_numpy(self.normalise))
     elif mode == '1':
         # Can't normalise a binary image
         image = BooleanImage(self._pil_to_numpy(False))
     elif mode == 'P':
         # Convert pallete images to RGB
         image = Image(self._pil_to_numpy(self.normalise, convert='RGB'))
     elif mode == 'F':  # Floating point images
         # Don't normalise as we don't know the scale
         image = Image(self._pil_to_numpy(False))
     else:
         raise ValueError('Unexpected mode for PIL: {}'.format(mode))
     return image
Esempio n. 5
0
    def build(self):
        r"""
        Read the image using PIL and then use the :map:`Image` constructor to
        create a class.
        """
        import PIL.Image as PILImage

        self._pil_image = PILImage.open(self.filepath)
        mode = self._pil_image.mode
        if mode == 'RGBA':
            # If normalise is False, then we return the alpha as an extra
            # channel, which can be useful if the alpha channel has semantic
            # meanings!
            if self.normalise:
                alpha = np.array(self._pil_image)[..., 3].astype(np.bool)
                image_pixels = self._pil_to_numpy(True, convert='RGB')
                image = MaskedImage(image_pixels, mask=alpha, copy=False)
            else:
                # With no normalisation we just return the pixels
                image = Image(self._pil_to_numpy(False), copy=False)
        elif mode in ['L', 'I', 'RGB']:
            # Greyscale, Integer and RGB images
            image = Image(self._pil_to_numpy(self.normalise), copy=False)
        elif mode == '1':
            # Can't normalise a binary image
            image = BooleanImage(self._pil_to_numpy(False), copy=False)
        elif mode == 'P':
            # Convert pallete images to RGB
            image = Image(self._pil_to_numpy(self.normalise, convert='RGB'))
        elif mode == 'F':  # Floating point images
            # Don't normalise as we don't know the scale
            image = Image(self._pil_to_numpy(False), copy=False)
        else:
            raise ValueError('Unexpected mode for PIL: {}'.format(mode))
        return image
Esempio n. 6
0
def test_create_MaskedImage_copy_true_mask_BooleanImage():
    pixels = np.ones((100, 100, 1))
    mask = np.ones((100, 100), dtype=np.bool)
    mask_image = BooleanImage(mask, copy=False)
    image = MaskedImage(pixels, mask=mask_image, copy=True)
    assert (not is_same_array(image.pixels, pixels))
    assert (not is_same_array(image.mask.pixels, mask))
Esempio n. 7
0
def test_booleanimage_copy():
    pixels = np.ones([10, 10], dtype=np.bool)
    landmarks = PointCloud(np.ones([3, 2]), copy=False)
    im = BooleanImage(pixels, copy=False)
    im.landmarks['test'] = landmarks
    im_copy = im.copy()

    assert (not is_same_array(im.pixels, im_copy.pixels))
    assert (not is_same_array(im_copy.landmarks['test'].points,
                              im.landmarks['test'].points))
Esempio n. 8
0
def sample_mask_for_centres(mask, centres):
    r"""
    Sample a mask at the centres

    Parameters
    ----------
    mask :  Either MaskedImage or Image class.
        The target image object that includes the windows_centres.

    window_centres : ndarray, optional
        If set, use these window centres to rescale the landmarks
        appropriately. If None, no scaling is applied.

    """
    return BooleanImage(mask[centres[..., 0], centres[..., 1]], copy=False)
Esempio n. 9
0
def rasterize_barycentric_coordinate_images(mesh, image_shape):
    h, w = image_shape
    yx, bcoords, tri_indices = rasterize_barycentric_coordinates(
        mesh, image_shape)

    tri_indices_img = np.zeros((1, h, w), dtype=int)
    bcoords_img = np.zeros((3, h, w))
    mask = np.zeros((h, w), dtype=np.bool)
    mask[yx[:, 0], yx[:, 1]] = True
    tri_indices_img[:, yx[:, 0], yx[:, 1]] = tri_indices
    bcoords_img[:, yx[:, 0], yx[:, 1]] = bcoords.T

    mask = BooleanImage(mask)
    return (MaskedImage(bcoords_img, mask=mask.copy(), copy=False),
            MaskedImage(tri_indices_img, mask=mask.copy(), copy=False))
Esempio n. 10
0
def holistic_sampling_from_scale(aam, scale=0.35):
    reference = aam.appearance_models[0].mean()
    scaled_reference = reference.rescale(scale)

    t = AlignmentUniformScale(scaled_reference.landmarks['source'].lms,
                              reference.landmarks['source'].lms)
    new_indices = np.require(np.round(
        t.apply(scaled_reference.mask.true_indices())),
                             dtype=np.int)

    modified_mask = deepcopy(reference.mask.pixels)
    modified_mask[:] = False
    modified_mask[:, new_indices[:, 0], new_indices[:, 1]] = True

    true_positions = np.nonzero(modified_mask[:,
                                              reference.mask.mask].ravel())[0]

    return true_positions, BooleanImage(modified_mask[0])
Esempio n. 11
0
File: base.py Progetto: yymath/menpo
    def transfer_mask(self, target_image, window_centres=None):
        r"""
        Transfers its own mask to the target_image object after
        appropriately correcting it. The mask correction is achieved based on
        the windows_centres of the features object.

        Parameters
        ----------
        target_image :  Either MaskedImage or Image class.
            The target image object that includes the windows_centres.

        window_centres : ndarray, optional
            If set, use these window centres to rescale the landmarks
            appropriately. If None, no scaling is applied.
        """
        from menpo.image import BooleanImage
        mask = self._image.mask.mask  # don't want a channel axis!
        if window_centres is not None:
            mask = mask[window_centres[..., 0], window_centres[..., 1]]
        target_image.mask = BooleanImage(mask.copy())
Esempio n. 12
0
def holistic_sampling_from_scale(aam, scale=0.35):
    r"""
    Function that generates a sampling reference mask given a scale value.

    Parameters
    ----------
    aam : :map:`AAM` or subclass
        The trained AAM.
    scale : `float`, optional
        The scale value.

    Returns
    -------
    true_positions : `ndarray` of `bool`
        The array that has ``True`` for the points of the reference shape that
        belong to the new mask.
    boolean_image : `menpo.image.BooleanImage`
        The boolean image of the mask.
    """
    reference = aam.appearance_models[0].mean()
    scaled_reference = reference.rescale(scale)

    t = AlignmentUniformScale(scaled_reference.landmarks['source'],
                              reference.landmarks['source'])
    new_indices = np.require(np.round(
        t.apply(scaled_reference.mask.true_indices())),
                             dtype=np.int)

    modified_mask = deepcopy(reference.mask.pixels)
    modified_mask[:] = False
    modified_mask[:, new_indices[:, 0], new_indices[:, 1]] = True

    true_positions = np.nonzero(modified_mask[:,
                                              reference.mask.mask].ravel())[0]

    return true_positions, BooleanImage(modified_mask[0])
Esempio n. 13
0
def rescale_images_to_reference_shape(images,
                                      group,
                                      reference_shape,
                                      tight_mask=True,
                                      sd=svs_shape,
                                      target_group=None,
                                      verbose=False):
    r"""
    """
    _has_lms_align = False
    _n_align_points = None
    _is_mc = False
    group_align = group
    _db_path = images[0].path.parent
    reference_align_shape = reference_shape
    n_landmarks = reference_shape.n_points
    # Normalize the scaling of all images wrt the reference_shape size
    for i in images:
        if 'LMS' in i.landmarks.keys():
            _has_lms_align = True
            i.landmarks['align'] = i.landmarks['LMS']
            if not _n_align_points:
                _n_align_points = i.landmarks['align'].lms.n_points

    if _has_lms_align:
        group_align = 'align'
        reference_align_shape = PointCloud(
            reference_shape.points[:_n_align_points])
        reference_shape = PointCloud(reference_shape.points[_n_align_points:])
    else:
        group_align = '_nicp'
        for i in images:
            source_shape = TriMesh(reference_shape.points)
            _, points_corr = nicp(source_shape, i.landmarks[group].lms)
            i.landmarks[group_align] = PointCloud(
                i.landmarks[group].lms.points[points_corr])

    print('  - Normalising')
    normalized_images = [
        i.rescale_to_pointcloud(reference_align_shape, group=group_align)
        for i in images
    ]

    # Global Parameters
    alpha = 30
    pdm = 0
    lms_shapes = [i.landmarks[group_align].lms for i in normalized_images]
    shapes = [i.landmarks[group].lms for i in normalized_images]
    n_shapes = len(shapes)

    # Align Shapes Using ICP
    aligned_shapes, target_shape, _removed_transform, _icp_transform, _icp\
        = align_shapes(shapes, reference_shape, lms_shapes=lms_shapes, align_target=reference_align_shape)
    # Build Reference Frame from Aligned Shapes
    bound_list = []
    for s in [reference_shape] + aligned_shapes.tolist():
        bmin, bmax = s.bounds()
        bound_list.append(bmin)
        bound_list.append(bmax)
        bound_list.append(np.array([bmin[0], bmax[1]]))
        bound_list.append(np.array([bmax[0], bmin[1]]))
    bound_list = PointCloud(np.array(bound_list))

    scales = np.max(bound_list.points, axis=0) - np.min(bound_list.points,
                                                        axis=0)
    max_scale = np.max(scales)
    bound_list = PointCloud(
        np.array([[max_scale, max_scale], [max_scale, 0], [0, max_scale],
                  [0, 0]]))

    reference_frame = build_reference_frame(bound_list, boundary=15)

    # Translation between reference shape and aliened shapes
    align_centre = target_shape.centre_of_bounds()
    align_t = Translation(reference_frame.centre() - align_centre)

    _rf_align = Translation(align_centre - reference_frame.centre())

    # Set All True Pixels for Mask
    reference_frame.mask.pixels = np.ones(reference_frame.mask.pixels.shape,
                                          dtype=np.bool)

    # Create Cache Directory
    home_dir = os.getcwd()
    dir_hex = uuid.uuid1()

    sd_path_in = '{}/shape_discriptor'.format(
        _db_path) if _db_path else '{}/.cache/{}/sd_training'.format(
            home_dir, dir_hex)
    sd_path_out = sd_path_in

    matE = MatlabExecuter()
    mat_code_path = '/vol/atlas/homes/yz4009/gitdev/mfsfdev'

    # Skip building svs is path specified
    _build_shape_desc(sd_path_in,
                      normalized_images,
                      reference_shape,
                      aligned_shapes,
                      align_t,
                      reference_frame,
                      _icp_transform,
                      _is_mc=_is_mc,
                      group=group,
                      target_align_shape=reference_align_shape,
                      _shape_desc=sd,
                      align_group=group_align,
                      target_group=target_group)

    # self._build_trajectory_basis(sample_groups, target_shape,
    #     aligned_shapes, dense_reference_shape, align_t)

    # Call Matlab to Build Flows
    if not isfile('{}/result.mat'.format(sd_path_in)):
        print('  - Building Shape Flow')
        matE.cd(mat_code_path)
        ext = 'gif'
        isLms = _has_lms_align + 0
        isBasis = 0
        fstr =  'gpuDevice(1);' \
                'addpath(\'{0}/{1}\');' \
                'addpath(\'{0}/{2}\');' \
                'build_flow(\'{3}\', \'{4}\', \'{5}\', {6}, {7}, ' \
                '{8}, \'{3}/{9}\', {10}, {11}, {14}, {15}, {12}, \'{13}\')'.format(
                    mat_code_path, 'cudafiles', 'tools',
                    sd_path_in, sd_path_out, 'sd_%04d.{}'.format(ext),
                    0,
                    1, n_shapes, 'bas.mat',
                    alpha, pdm, 30, 'sd_%04d_lms.pts', isBasis, isLms
               )
        sys.stderr.write(fstr)
        sys.stderr.write(fstr.replace('build_flow', 'build_flow_test'))
        p = matE.run_function(fstr)
        p.wait()
    else:
        sd_path_out = sd_path_in

    # Retrieve Results
    mat = sio.loadmat('{}/result.mat'.format(sd_path_out))

    _u, _v = mat['u'], mat['v']

    # Build Transforms
    print("  - Build Transform")
    transforms = []
    for i in range(n_shapes):
        transforms.append(OpticalFlowTransform(_u[:, :, i], _v[:, :, i]))

    # build dense shapes
    print("  - Build Dense Shapes")

    testing_points = reference_frame.mask.true_indices()
    ref_sparse_lms = align_t.apply(reference_shape)
    close_mask = BooleanImage(
        matpath(ref_sparse_lms.points).contains_points(testing_points).reshape(
            reference_frame.mask.mask.shape))

    if tight_mask:
        reference_frame.mask = close_mask
    else:
        reference_frame.landmarks['sparse'] = ref_sparse_lms
        reference_frame.constrain_mask_to_landmarks(group='sparse')

    # Get Dense Shape from Masked Image
    dense_reference_shape = PointCloud(
        np.vstack((align_t.apply(reference_align_shape).points,
                   align_t.apply(reference_shape).points,
                   reference_frame.mask.true_indices())))

    # Set Dense Shape as Reference Landmarks
    reference_frame.landmarks['source'] = dense_reference_shape
    dense_shapes = []
    for i, t in enumerate(transforms):
        warped_points = t.apply(dense_reference_shape)
        dense_shape = warped_points
        dense_shapes.append(dense_shape)

    ni = []
    for i, ds, t in zip(normalized_images, dense_shapes, _removed_transform):
        img = i.warp_to_shape(reference_frame.shape,
                              _rf_align.compose_before(t),
                              warp_landmarks=True)
        img.landmarks[group] = ds
        ni.append(img)

    return ni, transforms, reference_frame, n_landmarks, _n_align_points, _removed_transform, normalized_images, _rf_align, reference_shape, [
        align_t
        # _rf_align, _removed_transform, aligned_shapes, target_shape,
        # reference_frame, dense_reference_shape, testing_points,
        # align_t, normalized_images, shapes, lms_shapes,
        # reference_shape, reference_align_shape
    ]
Esempio n. 14
0
def test_boolean_copy_false_non_boolean():
    mask = np.zeros((10, 10))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        BooleanImage(mask, copy=False)
        assert (len(w) == 1)
Esempio n. 15
0
def test_boolean_copy_true():
    mask = np.zeros((10, 10), dtype=np.bool)
    boolean_image = BooleanImage(mask)
    assert (not is_same_array(boolean_image.pixels, mask))
Esempio n. 16
0
def mask_image_3d_test():
    mask_shape = (120, 121, 13)
    mask_region = np.ones(mask_shape)
    return BooleanImage(mask_region)
Esempio n. 17
0
def test_mask_image_3d():
    mask_shape = (13, 120, 121)
    mask_region = np.ones(mask_shape)
    return BooleanImage(mask_region)
def pillow_importer(filepath, asset=None, normalize=True, **kwargs):
    r"""
    Imports an image using PIL/pillow.

    Different image modes cause different importing strategies.

    RGB, L, I:
        Imported as either `float` or `uint8` depending on normalisation flag.
    RGBA:
        Imported as :map:`MaskedImage` if normalize is ``True`` else imported
        as a 4 channel `uint8` image.
    1:
        Imported as a :map:`BooleanImage`. Normalisation is ignored.
    F:
        Imported as a floating point image. Normalisation is ignored.

    Parameters
    ----------
    filepath : `Path`
        Absolute filepath of image
    asset : `object`, optional
        An optional asset that may help with loading. This is unused for this
        implementation.
    normalize : `bool`, optional
        If ``True``, normalize between 0.0 and 1.0 and convert to float. If
        ``False`` just pass whatever PIL imports back (according
        to types rules outlined in constructor).
    \**kwargs : `dict`, optional
        Any other keyword arguments.

    Returns
    -------
    image : :map:`Image` or subclass
        The imported image.
    """
    import PIL.Image as PILImage
    if isinstance(filepath, Path):
        filepath = str(filepath)
    pil_image = PILImage.open(filepath)
    mode = pil_image.mode
    if mode == 'RGBA':
        # If normalize is False, then we return the alpha as an extra
        # channel, which can be useful if the alpha channel has semantic
        # meanings!
        if normalize:
            alpha = np.array(pil_image)[..., 3].astype(np.bool)
            image_pixels = _pil_to_numpy(pil_image, True, convert='RGB')
            image = MaskedImage(image_pixels, mask=alpha, copy=False)
        else:
            # With no normalisation we just return the pixels
            image = Image(_pil_to_numpy(pil_image, False), copy=False)
    elif mode in ['L', 'I', 'RGB']:
        # Greyscale, Integer and RGB images
        image = Image(_pil_to_numpy(pil_image, normalize), copy=False)
    elif mode == '1':
        # Convert to 'L' type (http://stackoverflow.com/a/4114122/1716869).
        # Can't normalize a binary image
        image = BooleanImage(_pil_to_numpy(pil_image, False, convert='L'),
                             copy=True)
    elif mode == 'P':
        # Convert pallete images to RGB
        image = Image(_pil_to_numpy(pil_image, normalize, convert='RGB'))
    elif mode == 'F':  # Floating point images
        # Don't normalize as we don't know the scale
        image = Image(_pil_to_numpy(pil_image, False), copy=False)
    else:
        raise ValueError('Unexpected mode for PIL: {}'.format(mode))
    return image
Esempio n. 19
0
def test_boolean_copy_false_boolean():
    mask = np.zeros((10, 10), dtype=np.bool)
    boolean_image = BooleanImage(mask, copy=False)
    assert (is_same_array(boolean_image.pixels, mask))