def test_scale_2d_pseudoinverse(): scale1 = 0.5 scale2 = 4.0 h**o = np.array([[scale1, 0, 0], [0, scale2, 0], [0, 0, 1]]) tr = NonUniformScale([1 / scale1, 1 / scale2]) assert_almost_equal(tr.pseudoinverse().h_matrix, h**o)
def lm_centres_correction(centres): r""" Construct a transform that will correct landmarks for a window iterating feature calculation Parameters ---------- centres : `ndarray` (H, W, 2) The location of the window centres in the features Returns ------- :map:`Affine` An affine transform that performs the correction. Should be applied to the landmarks on the target image. """ t = Translation(-centres.min(axis=0).min(axis=0), skip_checks=True) step_v = centres[0, 0, 0] if centres.shape[0] > 1: step_v = centres[1, 0, 0] - centres[0, 0, 0] step_h = centres[0, 0, 1] if centres.shape[1] > 1: step_h = centres[0, 1, 1] - centres[0, 0, 1] s = NonUniformScale((1. / step_v, 1. / step_h), skip_checks=True) return t.compose_before(s)
def test_nonuniformscale2d_update_from_vector(): scale = np.array([3, 4]) h**o = np.array([[scale[0], 0, 0], [0, scale[1], 0], [0, 0, 1]]) tr = NonUniformScale(np.array([1, 2])) tr._from_vector_inplace(scale) assert_equal(tr.h_matrix, h**o)
def test_homog_compose_before_nonuniformscale(): homog = Homogeneous(np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])) s = NonUniformScale([3, 4]) res = homog.compose_before(s) assert(type(res) == Homogeneous) assert_allclose(res.h_matrix, np.array([[0, 3, 0], [4, 0, 0], [0, 0, 1]]))
def model_to_clip_transform(points, xy_scale=0.9, z_scale=0.3): r""" Produces an Affine Transform which centres and scales 3D points to fit into the OpenGL clipping space ([-1, 1], [-1, 1], [1, 1-]). This can be used to construct an appropriate projection matrix for use in an orthographic Rasterizer. Note that the z-axis is flipped as is default in OpenGL - as a result this transform converts the right handed coordinate input into a left hand one. Parameters ---------- points: :map:`PointCloud` The points that should be adjusted. xy_scale: `float` 0-1, optional Amount by which the boundary is relaxed so the points are not right against the edge. A value of 1 means the extremities of the point cloud will be mapped onto [-1, 1] [-1, 1] exactly (no boarder) A value of 0.5 means the points will be mapped into the range [-0.5, 0.5]. Default: 0.9 (map to [-0.9, 0.9]) z_scale: float 0-1, optional Scale factor by which the z-dimension is squeezed. A value of 1 means the z-range of the points will be mapped to exactly fit in [1, -1]. A scale of 0.1 means the z-range is compressed to fit in the range [0.1, -0.1]. Returns ------- :map:`Affine` The affine transform that creates this mapping """ # 1. Centre the points on the origin center = Translation(points.centre_of_bounds()).pseudoinverse() # 2. Scale the points to exactly fit the boundaries scale = Scale(points.range() / 2.0) # 3. Apply the relaxations requested - note the flip in the z axis!! # This is because OpenGL by default evaluates depth as bigger number == # further away. Thus not only do we need to get to clip space [-1, 1] in # all dims) but we must invert the z axis so depth buffering is correctly # applied. b_scale = NonUniformScale([xy_scale, xy_scale, -z_scale]) return center.compose_before(scale.pseudoinverse()).compose_before(b_scale)
def glyph(self, vectors_block_size=10, use_negative=False, channels=None): r""" Create glyph of a feature image. If feature_data has negative values, the use_negative flag controls whether there will be created a glyph of both positive and negative values concatenated the one on top of the other. Parameters ---------- vectors_block_size: int Defines the size of each block with vectors of the glyph image. use_negative: bool Defines whether to take into account possible negative values of feature_data. """ # first, choose the appropriate channels if channels is None: pixels = self.pixels[..., :4] elif channels != 'all': pixels = self.pixels[..., channels] else: pixels = self.pixels # compute the glyph negative_weights = -pixels scale = np.maximum(pixels.max(), negative_weights.max()) pos = _create_feature_glyph(pixels, vectors_block_size) pos = pos * 255 / scale glyph_image = pos if use_negative and pixels.min() < 0: neg = _create_feature_glyph(negative_weights, vectors_block_size) neg = neg * 255 / scale glyph_image = np.concatenate((pos, neg)) glyph = Image(glyph_image) # correct landmarks from menpo.transform import NonUniformScale image_shape = np.array(self.shape, dtype=np.double) glyph_shape = np.array(glyph.shape, dtype=np.double) nus = NonUniformScale(glyph_shape / image_shape) glyph.landmarks = self.landmarks nus.apply_inplace(glyph.landmarks) return glyph
def rebuild_feature_image(image, f_pixels): shape_changed = f_pixels.shape[1:] != image.shape if hasattr(image, 'mask'): # original image had a mask. Did the feature generate an image of the # same size? if shape_changed: # feature is of a different size - best we can do is rescale the # mask mask = image.mask.resize(f_pixels.shape[1:]) else: # feature is same size as input mask = image.mask.copy()) new_image = MaskedImage(f_pixels, mask=mask, copy=False) else: new_image = Image(f_pixels, copy=False) if image.has_landmarks: if shape_changed: # need to adjust the landmarks sf = torch.tensor(f_pixels.shape[1:]) / torch.tensor(image.shape) new_image.landmarks = NonUniformScale(sf).apply(image.landmarks) else: new_image.landmarks = image.landmarks return new_image
def test_nonuniformscale_from_list(): u_a = NonUniformScale([3, 2, 3]) u_b = NonUniformScale(np.array([3, 2, 3])) assert(np.all(u_a.h_matrix == u_b.h_matrix))
def test_nonuniformscale_2d_n_parameters(): scale = np.array([1, 2]) t = NonUniformScale(scale) assert(t.n_parameters == 2)
def test_nonuniformscale2d_as_vector(): scale = np.array([1, 2]) vec = NonUniformScale(scale).as_vector() assert_allclose(vec, scale)
def test_nonuniformscale_set_h_matrix_raises_notimplementederror(): s = NonUniformScale([2, 3, 4]) s.set_h_matrix(s.h_matrix)
def test_affine_pseudoinverse(): s = NonUniformScale([4, 3]) inv_man = NonUniformScale([1. / 4, 1. / 3]) b = Affine(s.h_matrix) i = b.pseudoinverse() assert_allclose(i.h_matrix, inv_man.h_matrix)
def rescale(self, scale, interpolator='scipy', round='ceil', **kwargs): r""" Return a copy of this image, rescaled by a given factor. All image information (landmarks) are rescaled appropriately. Parameters ---------- scale : float or tuple The scale factor. If a tuple, the scale to apply to each dimension. If a single float, the scale will be applied uniformly across each dimension. interpolator : 'scipy' or 'c', optional The interpolator that should be used to perform the warp. Default: 'scipy' round: {'ceil', 'floor', 'round'} Rounding function to be applied to floating point shapes. Default: 'ceil' kwargs : dict Passed through to the interpolator. See `menpo.interpolation` for details. Returns ------- rescaled_image : type(self) A copy of this image, rescaled. Raises ------ ValueError: If less scales than dimensions are provided. If any scale is less than or equal to 0. """ # Pythonic way of converting to list if we are passed a single float try: if len(scale) < self.n_dims: raise ValueError( 'Must provide a scale per dimension.' '{} scales were provided, {} were expected.'.format( len(scale), self.n_dims)) except TypeError: # Thrown when len() is called on a float scale = [scale] * self.n_dims # Make sure we have a numpy array scale = np.asarray(scale) for s in scale: if s <= 0: raise ValueError('Scales must be positive floats.') transform = NonUniformScale(scale) from menpo.image.boolean import BooleanImage # use the scale factor to make the template mask bigger template_mask = BooleanImage.blank(transform.apply(self.shape), round=round) # due to image indexing, we can't just apply the pseduoinverse # transform to achieve the scaling we want though! # Consider a 3x rescale on a 2x4 image. Looking at each dimension: # H 2 -> 6 so [0-1] -> [0-5] = 5/1 = 5x # W 4 -> 12 [0-3] -> [0-11] = 11/3 = 3.67x # => need to make the correct scale per dimension! shape = np.array(self.shape, dtype=np.float) # scale factors = max_index_after / current_max_index # (note that max_index = length - 1, as 0 based) scale_factors = (scale * shape - 1) / (shape - 1) inverse_transform = NonUniformScale(scale_factors).pseudoinverse # Note here we pass warp_mask to warp_to. In the case of # Images that aren't MaskedImages this kwarg will # harmlessly fall through so we are fine. return self.warp_to(template_mask, inverse_transform, warp_landmarks=True, interpolator=interpolator, **kwargs)
def normalize(gt): from menpo.transform import Translation, NonUniformScale t = Translation(gt.centre()).pseudoinverse() s = NonUniformScale(gt.range()).pseudoinverse() return t.compose_before(s)