Example #1
0
 def _increment_shape_model(self, shapes, scale_index, forgetting_factor=1.0):
     aligned_shapes = align_shapes(shapes)
     dense_shapes = densify_shapes(aligned_shapes, self.reference_frame, self.transform)
     # Increment shape model
     self.shape_models[scale_index].increment(
         dense_shapes, forgetting_factor=forgetting_factor, max_n_components=self.max_shape_components[scale_index]
     )
Example #2
0
 def _increment_shape_model(self, shapes, shape_model,
                            forgetting_factor=1.0):
     # Compute aligned shapes
     aligned_shapes = align_shapes(shapes)
     # Increment shape model
     shape_model.increment(aligned_shapes,
                           forgetting_factor=forgetting_factor)
Example #3
0
 def _increment_shape_model(self, shapes, shape_model,
                            forgetting_factor=1.0):
     aligned_shapes = align_shapes(shapes)
     dense_shapes = densify_shapes(aligned_shapes, self.reference_frame,
                                   self.transform)
     # Increment shape model
     shape_model.increment(dense_shapes,
                           forgetting_factor=forgetting_factor)
Example #4
0
 def _increment_shape_model(self, shapes, scale_index,
                            forgetting_factor=1.0):
     aligned_shapes = align_shapes(shapes)
     dense_shapes = densify_shapes(aligned_shapes, self.reference_frame,
                                   self.transform)
     # Increment shape model
     self.shape_models[scale_index].increment(
         dense_shapes, forgetting_factor=forgetting_factor,
         max_n_components=self.max_shape_components[scale_index])
Example #5
0
 def _build_shape_model(self, shapes, scale_index):
     mean_aligned_shape = mean_pointcloud(align_shapes(shapes))
     self.n_landmarks = mean_aligned_shape.n_points
     self.reference_frame = build_reference_frame(mean_aligned_shape)
     dense_shapes = densify_shapes(shapes, self.reference_frame,
                                   self.transform)
     # build dense shape model
     shape_model = build_shape_model(dense_shapes)
     return shape_model
Example #6
0
    def __init__(self, data, **kwargs):
        from menpofit.transform import DifferentiableAlignmentSimilarity

        aligned_shapes = align_shapes(data)
        self.mean = mean_pointcloud(aligned_shapes)
        # Default target is the mean
        self._target = self.mean
        self.transform = DifferentiableAlignmentSimilarity(
            self.target, self.target)
Example #7
0
    def _build_shape_model(self, shapes, scale_index):
        mean_aligned_shape = mean_pointcloud(align_shapes(shapes))
        self.n_landmarks = mean_aligned_shape.n_points
        self.reference_frame = build_reference_frame(mean_aligned_shape)
        dense_shapes = densify_shapes(shapes, self.reference_frame, self.transform)

        # Build dense shape model
        max_sc = self.max_shape_components[scale_index]
        return self._shape_model_cls[scale_index](dense_shapes, max_n_components=max_sc)
Example #8
0
    def __init__(self, data, **kwargs):
        from menpofit.transform import DifferentiableAlignmentSimilarity

        aligned_shapes = align_shapes(data)
        self.mean = mean_pointcloud(aligned_shapes)
        # Default target is the mean
        self._target = self.mean
        self.transform = DifferentiableAlignmentSimilarity(self.target,
                                                           self.target)
Example #9
0
 def increment(self, shapes, n_shapes=None, forgetting_factor=1.0,
               max_n_components=None, verbose=False):
     old_target = self.target
     aligned_shapes = align_shapes(shapes)
     self.model.increment(aligned_shapes, n_samples=n_shapes,
                          forgetting_factor=forgetting_factor,
                          verbose=verbose)
     if max_n_components is not None:
         self.model.trim_components(max_n_components)
     # Reset the target given the new model
     self.set_target(old_target)
Example #10
0
 def _build_shape_model(self, shapes, scale_index):
     mean_aligned_shape = mean_pointcloud(align_shapes(shapes))
     self.n_landmarks = mean_aligned_shape.n_points
     self.reference_frame = build_patch_reference_frame(
         mean_aligned_shape, patch_shape=self.patch_shape[scale_index])
     dense_shapes = densify_shapes(shapes, self.reference_frame,
                                   self.transform)
     # Build dense shape model
     max_sc = self.max_shape_components[scale_index]
     return self._shape_model_cls[scale_index](dense_shapes,
                                               max_n_components=max_sc)
Example #11
0
    def __init__(self, data, max_n_components=None):
        if isinstance(data, PCAModel):
            shape_model = data
        else:
            aligned_shapes = align_shapes(data)
            shape_model = PCAModel(aligned_shapes)

        if max_n_components is not None:
            shape_model.trim_components(max_n_components)
        super(PDM, self).__init__(shape_model)
        # Default target is the mean
        self._target = self.model.mean()
Example #12
0
    def __init__(self, data, max_n_components=None):
        if isinstance(data, PCAModel):
            shape_model = data
        else:
            aligned_shapes = align_shapes(data)
            shape_model = PCAModel(aligned_shapes)

        if max_n_components is not None:
            shape_model.trim_components(max_n_components)
        super(PDM, self).__init__(shape_model)
        # Default target is the mean
        self._target = self.model.mean()
Example #13
0
    def increment(self,
                  shapes,
                  n_shapes=None,
                  forgetting_factor=1.0,
                  max_n_components=None,
                  verbose=False):
        r"""
        Update the eigenvectors, eigenvalues and mean vector of this model
        by performing incremental PCA on the given samples.

        Parameters
        ----------
        shapes : `list` of `menpo.shape.PointCloud`
            List of new shapes to update the model from.
        n_shapes : `int` or ``None``, optional
            If `int`, then `shapes`  must be an iterator that yields `n_shapes`.
            If ``None``, then `shapes` has to be a list (so we know how large
            the data matrix needs to be).
        forgetting_factor : ``[0.0, 1.0]`` `float`, optional
            Forgetting factor that weights the relative contribution of new
            samples vs old samples. If 1.0, all samples are weighted equally
            and, hence, the results is the exact same as performing batch
            PCA on the concatenated list of old and new simples. If <1.0,
            more emphasis is put on the new samples. See [1] for details.
        max_n_components : `int` or ``None``, optional
            The maximum number of components that the model will keep.
            If ``None``, then all the components will be kept.
        verbose : `bool`, optional
            If ``True``, then information about the progress will be printed.

        References
        ----------
        .. [1] D. Ross, J. Lim, R.S. Lin, M.H. Yang. "Incremental Learning for
            Robust Visual Tracking". International Journal on Computer Vision,
            2007.
        """
        old_target = self.target
        aligned_shapes = align_shapes(shapes)
        self.model.increment(aligned_shapes,
                             n_samples=n_shapes,
                             forgetting_factor=forgetting_factor,
                             verbose=verbose)
        if max_n_components is not None:
            self.model.trim_components(max_n_components)
        # Re-orthonormalize
        self._construct_similarity_model()
        # Reset the target given the new models
        self.set_target(old_target)
Example #14
0
 def increment(self,
               shapes,
               n_shapes=None,
               forgetting_factor=1.0,
               max_n_components=None,
               verbose=False):
     old_target = self.target
     aligned_shapes = align_shapes(shapes)
     self.model.increment(aligned_shapes,
                          n_samples=n_shapes,
                          forgetting_factor=forgetting_factor,
                          verbose=verbose)
     if max_n_components is not None:
         self.model.trim_components(max_n_components)
     # Reset the target given the new model
     self.set_target(old_target)
Example #15
0
    def increment(self, shapes, n_shapes=None, forgetting_factor=1.0,
                  max_n_components=None, verbose=False):
        r"""
        Update the eigenvectors, eigenvalues and mean vector of this model
        by performing incremental PCA on the given samples.

        Parameters
        ----------
        shapes : `list` of `menpo.shape.PointCloud`
            List of new shapes to update the model from.
        n_shapes : `int` or ``None``, optional
            If `int`, then `shapes`  must be an iterator that yields `n_shapes`.
            If ``None``, then `shapes` has to be a list (so we know how large
            the data matrix needs to be).
        forgetting_factor : ``[0.0, 1.0]`` `float`, optional
            Forgetting factor that weights the relative contribution of new
            samples vs old samples. If 1.0, all samples are weighted equally
            and, hence, the results is the exact same as performing batch
            PCA on the concatenated list of old and new simples. If <1.0,
            more emphasis is put on the new samples. See [1] for details.
        max_n_components : `int` or ``None``, optional
            The maximum number of components that the model will keep.
            If ``None``, then all the components will be kept.
        verbose : `bool`, optional
            If ``True``, then information about the progress will be printed.

        References
        ----------
        .. [1] D. Ross, J. Lim, R.S. Lin, M.H. Yang. "Incremental Learning for
            Robust Visual Tracking". International Journal on Computer Vision,
            2007.
        """
        old_target = self.target
        aligned_shapes = align_shapes(shapes)
        self.model.increment(aligned_shapes, n_samples=n_shapes,
                             forgetting_factor=forgetting_factor,
                             verbose=verbose)
        if max_n_components is not None:
            self.model.trim_components(max_n_components)
        # Re-orthonormalize
        self._construct_similarity_model()
        # Reset the target given the new models
        self.set_target(old_target)
Example #16
0
    def _train_batch(self,
                     image_batch,
                     increment=False,
                     group=None,
                     verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(image_batch,
                                                        group,
                                                        self.reference_shape,
                                                        verbose=verbose)

        # If the deformation graph was not provided (None given), then compute
        # the MST
        if None in self.deformation_graph:
            graph_shapes = [i.landmarks[group].lms for i in image_batch]
            deformation_mst = _compute_minimum_spanning_tree(graph_shapes,
                                                             root_vertex=0,
                                                             prefix='- ',
                                                             verbose=verbose)
            self.deformation_graph = [
                deformation_mst if g is None else g
                for g in self.deformation_graph
            ]

        # Build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif (j == 0 or self.holistic_features[j]
                  is not self.holistic_features[j - 1]):
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Apply procrustes to align the shapes
            aligned_shapes = align_shapes(scale_shapes)

            # Build the shape model using the aligned shapes
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                self.shape_models.append(
                    self._build_shape_model(aligned_shapes,
                                            self.shape_graph[j],
                                            self.max_shape_components[j],
                                            verbose=verbose))
            else:
                self.shape_models[j].increment(aligned_shapes, verbose=verbose)

            # Build the deformation model
            if verbose:
                print_dynamic(
                    '{}Building deformation model'.format(scale_prefix))

            if self.use_procrustes:
                deformation_shapes = aligned_shapes
            else:
                deformation_shapes = scale_shapes

            if not increment:
                self.deformation_models.append(
                    self._build_deformation_model(deformation_shapes,
                                                  self.deformation_graph[j],
                                                  verbose=verbose))
            else:
                self.deformation_models[j].increment(deformation_shapes,
                                                     verbose=verbose)

            # Obtain warped images
            warped_images = self._warp_images(scaled_images, scale_shapes, j,
                                              scale_prefix, verbose)

            # Build the appearance model
            if verbose:
                print_dynamic(
                    '{}Building appearance model'.format(scale_prefix))

            if not increment:
                self.appearance_models.append(
                    self._build_appearance_model(
                        warped_images,
                        self.appearance_graph[j],
                        self.n_appearance_components[j],
                        verbose=verbose))
            else:
                self._increment_appearance_model(warped_images,
                                                 self.appearance_graph[j],
                                                 self.appearance_models[j],
                                                 verbose=verbose)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
Example #17
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape, verbose=verbose)

        # If the deformation graph was not provided (None given), then compute
        # the MST
        if None in self.deformation_graph:
            graph_shapes = [i.landmarks[group] for i in image_batch]
            deformation_mst = _compute_minimum_spanning_tree(
                graph_shapes, root_vertex=0, prefix='- ', verbose=verbose)
            self.deformation_graph = [deformation_mst if g is None else g
                                      for g in self.deformation_graph]

        # Build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif (j == 0 or self.holistic_features[j] is not
                  self.holistic_features[j - 1]):
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group] for i in scaled_images]

            # Apply procrustes to align the shapes
            aligned_shapes = align_shapes(scale_shapes)

            # Build the shape model using the aligned shapes
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                self.shape_models.append(self._build_shape_model(
                    aligned_shapes, self.shape_graph[j],
                    self.max_shape_components[j], verbose=verbose))
            else:
                self.shape_models[j].increment(aligned_shapes, verbose=verbose)

            # Build the deformation model
            if verbose:
                print_dynamic('{}Building deformation model'.format(
                    scale_prefix))

            if self.use_procrustes:
                deformation_shapes = aligned_shapes
            else:
                deformation_shapes = scale_shapes

            if not increment:
                self.deformation_models.append(self._build_deformation_model(
                    deformation_shapes, self.deformation_graph[j],
                    verbose=verbose))
            else:
                self.deformation_models[j].increment(deformation_shapes,
                                                     verbose=verbose)

            # Obtain warped images
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              j, scale_prefix, verbose)

            # Build the appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(
                    scale_prefix))

            if not increment:
                self.appearance_models.append(self._build_appearance_model(
                    warped_images, self.appearance_graph[j],
                    self.n_appearance_components[j], verbose=verbose))
            else:
                self._increment_appearance_model(
                    warped_images, self.appearance_graph[j],
                    self.appearance_models[j], verbose=verbose)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))