def __init__(self, model, transform_cls, global_transform, source=None): self.pdm = GlobalPDM(model, global_transform) self._cached_points = None self.transform = transform_cls(source, self.target)
class GlobalMDTransform(ModelDrivenTransform): r""" A transform that couples an alignment transform to a statistical model together with a global similarity transform, such that the weights of the transform are fully specified by both the weights of statistical model and the weights of the similarity transform. The model is assumed to generate an instance which is then transformed by the similarity transform; the result defines the target landmarks of the transform. If no source is provided, the mean of the model is defined as the source landmarks of the transform. Parameters ---------- model : :class:`menpo.model.base.StatisticalModel` A linear statistical shape model. transform_cls : :class:`menpo.transform.AlignableTransform` A class of :class:`menpo.transform.base.AlignableTransform` The align constructor will be called on this with the source and target landmarks. The target is set to the points generated from the model using the provide weights - the source is either given or set to the model's mean. global_transform : :class:`menpo.transform.AlignableTransform` A class of :class:`menpo.transform.base.AlignableTransform` The global transform that should be applied to the model output. Doesn't have to have been constructed from the .align() constructor. Note that the GlobalMDTransform isn't guaranteed to hold on to the exact object passed in here - so don't expect external changes to the global_transform to be reflected in the behavior of this object. source : :class:`menpo.shape.base.PointCloud`, optional The source landmarks of the transform. If no `source` is provided the mean of the model is used. weights : (P,) ndarray, optional The reconstruction weights that will be fed to the model in order to generate an instance of the target landmarks. composition: 'both', 'warp' or 'model', optional The composition approximation employed by this ModelDrivenTransform. Default: `both` """ def __init__(self, model, transform_cls, global_transform, source=None): self.pdm = GlobalPDM(model, global_transform) self._cached_points = None self.transform = transform_cls(source, self.target) def compose_after_from_vector_inplace(self, delta): r""" Composes two ModelDrivenTransforms together based on the first order approximation proposed by Papandreou and Maragos in [1]. Parameters ---------- delta : (N,) ndarray Vectorized :class:`ModelDrivenTransform` to be applied **before** self Returns -------- transform : self self, updated to the result of the composition References ---------- .. [1] G. Papandreou and P. Maragos, "Adaptive and Constrained Algorithms for Inverse Compositional Active Appearance Model Fitting", CVPR08 """ # the incremental warp is always evaluated at p=0, ie the mean shape points = self.pdm.model.mean.points # compute: # - dW/dp when p=0 # - dW/dp when p!=0 # - dW/dx when p!=0 evaluated at the source landmarks # dW/dq when p=0 and when p!=0 are the same and given by the # Jacobian of the global transform evaluated at the mean of the # model # (n_points, n_global_params, n_dims) dW_dq = self.pdm._global_transform_d_dp(points) # dW/db when p=0, is the Jacobian of the model # (n_points, n_weights, n_dims) dW_db_0 = PDM.d_dp(self.pdm, points) # dW/dp when p=0, is simply the concatenation of the previous # two terms # (n_points, n_params, n_dims) dW_dp_0 = np.hstack((dW_dq, dW_db_0)) # by application of the chain rule dW_db when p!=0, # is the Jacobian of the global transform wrt the points times # the Jacobian of the model: dX(S)/db = dX/dS * dS/db # (n_points, n_dims, n_dims) dW_dS = self.pdm.global_transform.d_dx(points) # (n_points, n_weights, n_dims) dW_db = np.einsum('ilj, idj -> idj', dW_dS, dW_db_0) # dW/dp is simply the concatenation of dW_dq with dW_db # (n_points, n_params, n_dims) dW_dp = np.hstack((dW_dq, dW_db)) # dW/dx is the jacobian of the transform evaluated at the source # landmarks # (n_points, n_dims, n_dims) dW_dx = self.transform.d_dx(points) # (n_points, n_params, n_dims) dW_dx_dW_dp_0 = np.einsum('ijk, ilk -> ilk', dW_dx, dW_dp_0) #TODO: Can we do this without splitting across the two dimensions? # dW_dx_x = dW_dx[:, 0, :].flatten()[..., None] # dW_dx_y = dW_dx[:, 1, :].flatten()[..., None] # dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims, # self.n_parameters)) # dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y # # (n_points, n_params, n_dims) # dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0, # (n_points, self.n_parameters, self.n_dims)) # (n_params, n_params) J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0) # (n_params, n_params) H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp) # (n_params, n_params) Jp = np.linalg.solve(H, J) self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))
class GlobalMDTransform(ModelDrivenTransform): r""" A transform that couples an alignment transform to a statistical model together with a global similarity transform, such that the weights of the transform are fully specified by both the weights of statistical model and the weights of the similarity transform. The model is assumed to generate an instance which is then transformed by the similarity transform; the result defines the target landmarks of the transform. If no source is provided, the mean of the model is defined as the source landmarks of the transform. Parameters ---------- model : :class:`menpo.model.base.StatisticalModel` A linear statistical shape model. transform_cls : :class:`menpo.transform.AlignableTransform` A class of :class:`menpo.transform.base.AlignableTransform` The align constructor will be called on this with the source and target landmarks. The target is set to the points generated from the model using the provide weights - the source is either given or set to the model's mean. global_transform : :class:`menpo.transform.AlignableTransform` A class of :class:`menpo.transform.base.AlignableTransform` The global transform that should be applied to the model output. Doesn't have to have been constructed from the .align() constructor. Note that the GlobalMDTransform isn't guaranteed to hold on to the exact object passed in here - so don't expect external changes to the global_transform to be reflected in the behavior of this object. source : :class:`menpo.shape.base.PointCloud`, optional The source landmarks of the transform. If no `source` is provided the mean of the model is used. weights : (P,) ndarray, optional The reconstruction weights that will be fed to the model in order to generate an instance of the target landmarks. composition: 'both', 'warp' or 'model', optional The composition approximation employed by this ModelDrivenTransform. Default: `both` """ def __init__(self, model, transform_cls, global_transform, source=None): self.pdm = GlobalPDM(model, global_transform) self._cached_points = None self.transform = transform_cls(source, self.target) def compose_after_from_vector_inplace(self, delta): r""" Composes two ModelDrivenTransforms together based on the first order approximation proposed by Papandreou and Maragos in [1]. Parameters ---------- delta : (N,) ndarray Vectorized :class:`ModelDrivenTransform` to be applied **before** self Returns -------- transform : self self, updated to the result of the composition References ---------- .. [1] G. Papandreou and P. Maragos, "Adaptive and Constrained Algorithms for Inverse Compositional Active Appearance Model Fitting", CVPR08 """ # the incremental warp is always evaluated at p=0, ie the mean shape points = self.pdm.model.mean.points # compute: # - dW/dp when p=0 # - dW/dp when p!=0 # - dW/dx when p!=0 evaluated at the source landmarks # dW/dq when p=0 and when p!=0 are the same and given by the # Jacobian of the global transform evaluated at the mean of the # model # (n_points, n_global_params, n_dims) dW_dq = self.pdm._global_transform_d_dp(points) # dW/db when p=0, is the Jacobian of the model # (n_points, n_weights, n_dims) dW_db_0 = PDM.d_dp(self.pdm, points) # dW/dp when p=0, is simply the concatenation of the previous # two terms # (n_points, n_params, n_dims) dW_dp_0 = np.hstack((dW_dq, dW_db_0)) # by application of the chain rule dW_db when p!=0, # is the Jacobian of the global transform wrt the points times # the Jacobian of the model: dX(S)/db = dX/dS * dS/db # (n_points, n_dims, n_dims) dW_dS = self.pdm.global_transform.d_dx(points) # (n_points, n_weights, n_dims) dW_db = np.einsum("ilj, idj -> idj", dW_dS, dW_db_0) # dW/dp is simply the concatenation of dW_dq with dW_db # (n_points, n_params, n_dims) dW_dp = np.hstack((dW_dq, dW_db)) # dW/dx is the jacobian of the transform evaluated at the source # landmarks # (n_points, n_dims, n_dims) dW_dx = self.transform.d_dx(points) # (n_points, n_params, n_dims) dW_dx_dW_dp_0 = np.einsum("ijk, ilk -> ilk", dW_dx, dW_dp_0) # TODO: Can we do this without splitting across the two dimensions? # dW_dx_x = dW_dx[:, 0, :].flatten()[..., None] # dW_dx_y = dW_dx[:, 1, :].flatten()[..., None] # dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims, # self.n_parameters)) # dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y # # (n_points, n_params, n_dims) # dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0, # (n_points, self.n_parameters, self.n_dims)) # (n_params, n_params) J = np.einsum("ijk, ilk -> jl", dW_dp, dW_dx_dW_dp_0) # (n_params, n_params) H = np.einsum("ijk, ilk -> jl", dW_dp, dW_dp) # (n_params, n_params) Jp = np.linalg.solve(H, J) self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))