Exemple #1
0
    def _prepare_image(self, image, initial_shape, gt_shape=None):
        r"""
        The image is first rescaled wrt the reference_landmarks, then
        smoothing or gaussian pyramid are computed and, finally, features
        are extracted from each pyramidal element.
        """
        image.landmarks['initial_shape'] = initial_shape
        image = image.rescale_to_reference_shape(
            self.reference_shape, group='initial_shape',
            interpolator=self.interpolator)

        if gt_shape:
            image.landmarks['gt_shape'] = initial_shape

        if self.n_levels > 1:
            if self.scaled_levels:
                pyramid = image.gaussian_pyramid(
                    n_levels=self.n_levels, downscale=self.downscale)
            else:
                pyramid = image.smoothing_pyramid(
                    n_levels=self.n_levels, downscale=self.downscale)
            images = [compute_features(i, self.feature_type)
                      for i in pyramid]
            images.reverse()
        else:
            images = [compute_features(image, self.feature_type)]

        return images
Exemple #2
0
    def _prepare_image(self, image, initial_shape, gt_shape=None):
        r"""
        The image is first rescaled wrt the reference_landmarks, then
        smoothing or gaussian pyramid are computed and, finally, features
        are extracted from each pyramidal element.
        """
        image.landmarks['initial_shape'] = initial_shape
        image = image.rescale_to_reference_shape(
            self.reference_shape,
            group='initial_shape',
            interpolator=self.interpolator)

        if gt_shape:
            image.landmarks['gt_shape'] = initial_shape

        if self.n_levels > 1:
            if self.scaled_levels:
                pyramid = image.gaussian_pyramid(n_levels=self.n_levels,
                                                 downscale=self.downscale)
            else:
                pyramid = image.smoothing_pyramid(n_levels=self.n_levels,
                                                  downscale=self.downscale)
            images = [compute_features(i, self.feature_type) for i in pyramid]
            images.reverse()
        else:
            images = [compute_features(image, self.feature_type)]

        return images
Exemple #3
0
    def _prepare_image(self, image, initial_shape, gt_shape=None):
        r"""
        The image is first rescaled wrt the reference_landmarks and then the
        gaussian pyramid is computed. Depending on the pyramid_on_features
        flag, the pyramid is either applied on the feature image or
        features are extracted at each pyramidal level.

        Parameters
        ----------
        image: :class:`menpo.image.MaskedImage`
            The image to be fitted.
        initial_shape: class:`menpo.shape.PointCloud`
            The initial shape from which the fitting will start.
        gt_shape: class:`menpo.shape.PointCloud`, optional
            The original ground truth shape associated to the image.

            Default: None

        Returns
        -------
        images: list of :class:`menpo.image.masked.MaskedImage`
            List of images, each being the result of applying the pyramid.
        """
        # rescale image wrt the scale factor between reference_shape and
        # initial_shape
        image.landmarks['initial_shape'] = initial_shape
        image = image.rescale_to_reference_shape(
            self.reference_shape, group='initial_shape',
            interpolator=self.interpolator)

        # attach given ground truth shape
        if gt_shape:
            image.landmarks['gt_shape'] = gt_shape

        # apply pyramid
        if self.n_levels > 1:
            if self.pyramid_on_features:
                # compute features at highest level
                feature_image = compute_features(image, self.feature_type[0])

                # apply pyramid on feature image
                pyramid = feature_image.gaussian_pyramid(
                    n_levels=self.n_levels, downscale=self.downscale)

                # get rescaled feature images
                images = list(pyramid)
            else:
                # create pyramid on intensities image
                pyramid = image.gaussian_pyramid(
                    n_levels=self.n_levels, downscale=self.downscale)

                # compute features at each level
                images = [compute_features(
                    i, self.feature_type[self.n_levels - j - 1])
                    for j, i in enumerate(pyramid)]
            images.reverse()
        else:
            images = [compute_features(image, self.feature_type[0])]
        return images
Exemple #4
0
    def _prepare_image(self, image, initial_shape, gt_shape=None):
        r"""
        The image is first rescaled wrt the reference_landmarks and then the
        gaussian pyramid is computed. Depending on the pyramid_on_features
        flag, the pyramid is either applied on the feature image or
        features are extracted at each pyramidal level.

        Parameters
        ----------
        image: :class:`menpo.image.MaskedImage`
            The image to be fitted.
        initial_shape: class:`menpo.shape.PointCloud`
            The initial shape from which the fitting will start.
        gt_shape: class:`menpo.shape.PointCloud`, optional
            The original ground truth shape associated to the image.

            Default: None

        Returns
        -------
        images: list of :class:`menpo.image.masked.MaskedImage`
            List of images, each being the result of applying the pyramid.
        """
        # rescale image wrt the scale factor between reference_shape and
        # initial_shape
        image.landmarks['initial_shape'] = initial_shape
        image = image.rescale_to_reference_shape(
            self.reference_shape, group='initial_shape',
            interpolator=self.interpolator)

        # attach given ground truth shape
        if gt_shape:
            image.landmarks['gt_shape'] = gt_shape

        # apply pyramid
        if self.n_levels > 1:
            if self.pyramid_on_features:
                # compute features at highest level
                feature_image = compute_features(image, self.feature_type[0])

                # apply pyramid on feature image
                pyramid = feature_image.gaussian_pyramid(
                    n_levels=self.n_levels, downscale=self.downscale)

                # get rescaled feature images
                images = list(pyramid)
            else:
                # create pyramid on intensities image
                pyramid = image.gaussian_pyramid(
                    n_levels=self.n_levels, downscale=self.downscale)

                # compute features at each level
                images = [compute_features(
                    i, self.feature_type[self.n_levels - j - 1])
                    for j, i in enumerate(pyramid)]
            images.reverse()
        else:
            images = [compute_features(image, self.feature_type[0])]
        return images
Exemple #5
0
    def response_image(self, image, group=None, label="all", level=-1):
        r"""
        Generates a response image result of applying the classifiers of a
        particular pyramidal level of the CLM to an image.

        Parameters
        -----------
        image: :map:`Image`
            The image.

        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.

        label : `string`, optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

        level: `int`, optional
            The pyramidal level to be used.

        Returns
        -------
        image : :map:`Image`
            The response image.
        """
        # rescale image
        image = image.rescale_to_reference_shape(self.reference_shape, group=group, label=label)

        # apply pyramid
        if self.n_levels > 1:
            if self.pyramid_on_features:
                # compute features at highest level
                feature_image = compute_features(image, self.feature_type[0])

                # apply pyramid on feature image
                pyramid = feature_image.gaussian_pyramid(n_levels=self.n_levels, downscale=self.downscale)

                # get rescaled feature images
                images = list(pyramid)
            else:
                # create pyramid on intensities image
                pyramid = image.gaussian_pyramid(n_levels=self.n_levels, downscale=self.downscale)

                # compute features at each level
                images = [compute_features(i, self.feature_type[self.n_levels - j - 1]) for j, i in enumerate(pyramid)]
            images.reverse()
        else:
            images = [compute_features(image, self.feature_type[0])]

        # initialize responses
        image = images[level]
        image_pixels = np.reshape(image.pixels, (-1, image.n_channels))
        response_data = np.zeros((image.shape[0], image.shape[1], self.n_classifiers_per_level[level]))
        # Compute responses
        for j, clf in enumerate(self.classifiers[level]):
            response_data[:, :, j] = np.reshape(clf(image_pixels), image.shape)
        return Image(image_data=response_data)
Exemple #6
0
    def features(self, image, shape):
        r"""
        Method that extracts the features for the regression, which in this
        case are patch based.

        Parameters
        ----------
        image : :map:`MaskedImage`
            The current image.

        shape : :map:`PointCloud`
            The current shape.
        """
        # extract patches
        patches = extract_local_patches_fast(image, shape, self.patch_shape)

        features = np.zeros((shape.n_points, self._feature_patch_length))
        for j, patch in enumerate(patches):
            # build patch image
            patch_img = Image(patch, copy=False)
            # compute features
            features[j, ...] = compute_features(
                patch_img, self.regression_features).as_vector()

        return np.hstack((features.ravel(), 1))
Exemple #7
0
    def _create_pyramid(cls, images, n_levels, downscale, pyramid_on_features,
                        feature_type, verbose=False):
        r"""
        Function that creates a generator function for Gaussian pyramid. The
        pyramid can be created either on the feature space or the original
        (intensities) space.

        Parameters
        ----------
        images: list of :class:`menpo.image.Image`
            The set of landmarked images from which to build the AAM.
        n_levels: int
            The number of multi-resolution pyramidal levels to be used.
        downscale: float
            The downscale factor that will be used to create the different
            pyramidal levels.
        pyramid_on_features: boolean
            If True, the features are extracted at the highest level and the
            pyramid is created on the feature images.
            If False, the pyramid is created on the original (intensities)
            space.
        feature_type: list of size 1 with str or function/closure or None
            The feature type to be used in case pyramid_on_features is enabled.
        verbose: bool, Optional
            Flag that controls information and progress printing.

            Default: False

        Returns
        -------
        generator: function
            The generator function of the Gaussian pyramid.
        """
        if pyramid_on_features:
            # compute features at highest level
            feature_images = []
            for c, i in enumerate(images):
                if verbose:
                    print_dynamic('- Computing feature space: {}'.format(
                        progress_bar_str((c + 1.) / len(images),
                                         show_bar=False)))
                feature_images.append(compute_features(i, feature_type[0]))
            if verbose:
                print_dynamic('- Computing feature space: Done\n')

            # create pyramid on feature_images
            generator = [i.gaussian_pyramid(n_levels=n_levels,
                                            downscale=downscale)
                         for i in feature_images]
        else:
            # create pyramid on intensities images
            # features will be computed per level
            generator = [i.gaussian_pyramid(n_levels=n_levels,
                                            downscale=downscale)
                         for i in images]
        return generator
Exemple #8
0
    def response_image(self, image, group=None, label='all', level=-1):
        r"""
        Generates a response image result of applying the classifiers of a
        particular pyramidal level of the CLM to an image.

        Parameters
        -----------
        image: :class:`menpo.image.base.Image`
            The image.

        group : string, Optional
            The key of the landmark set that should be used. If None,
            and if there is only one set of landmarks, this set will be used.

            Default: None

        label: string, Optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

            Default: 'all'

        level: int, optional
            The pyramidal level to be used.

            Default: -1

        Returns
        -------
        image: :class:`menpo.image.base.Image`
            The response image.
        """
        image = image.rescale_to_reference_shape(self.reference_shape,
                                                 group=group, label=label)

        pyramid = image.gaussian_pyramid(n_levels=self.n_levels,
                                         downscale=self.downscale)
        images = [compute_features(i, self.feature_type)
                  for i in pyramid]
        images.reverse()

        image = images[level]
        image_pixels = np.reshape(image.pixels, (-1, image.n_channels))
        response_data = np.zeros((image.shape[0], image.shape[1],
                                  self.n_classifiers_per_level[level]))
        # Compute responses
        for j, clf in enumerate(self.classifiers[level]):
            response_data[:, :, j] = np.reshape(clf(image_pixels),
                                                image.shape)

        return Image(image_data=response_data)
Exemple #9
0
    def build(self, images, group=None, label='all'):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images: list of :class:`menpo.image.Image`
            The set of landmarked images from which to build the AAM.

        group : string, Optional
            The key of the landmark set that should be used. If None,
            and if there is only one set of landmarks, this set will be used.

            Default: None

        label: string, Optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

            Default: 'all'

        Returns
        -------
        aam : :class:`menpo.fitmultiple.aam.builder.AAM`
            The AAM object
        """
        print '- Preprocessing'
        self.reference_shape, generator = self._preprocessing(
            images, group, label, self.diagonal_range, self.interpolator,
            self.scaled_levels, self.n_levels, self.downscale)

        print '- Building model pyramids'
        shape_models = []
        appearance_models = []
        # for each level
        for j in np.arange(self.n_levels):
            print ' - Level {}'.format(j)

            print '  - Computing feature space'
            images = [compute_features(g.next(), self.feature_type)
                      for g in generator]
            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label].lms for i in images]

            if j == 0 or self.scaled_levels:
                print '  - Building shape model'
                if j != 0:
                    shapes = [Scale(1/self.downscale,
                                    n_dims=shapes[0].n_dims).apply(s)
                              for s in shapes]
                shape_model = self._build_shape_model(
                    shapes, self.max_shape_components)

                print '  - Building reference frame'
                reference_frame = self._build_reference_frame(
                    shape_model.mean)

            # add shape model to the list
            shape_models.append(shape_model)

            print '  - Computing transforms'
            transforms = [self.transform(reference_frame.landmarks['source'].lms,
                                         i.landmarks[group][label].lms)
                          for i in images]

            print '  - Warping images'
            images = [i.warp_to(reference_frame.mask, t,
                                interpolator=self.interpolator)
                      for i, t in zip(images, transforms)]

            for i in images:
                i.landmarks['source'] = reference_frame.landmarks['source']
                self._mask_image(i)

            print '  - Building appearance model'
            appearance_model = PCAModel(images)
            # trim appearance model if required
            if self.max_appearance_components is not None:
                appearance_model.trim_components(
                    self.max_appearance_components)

            # add appearance model to the list
            appearance_models.append(appearance_model)

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()

        return self._build_aam(shape_models, appearance_models)
Exemple #10
0
 def __str__(self):
     out = "Supervised Descent Method\n" \
           " - Non-Parametric '{}' Regressor\n" \
           " - {} training images.\n".format(
           self._fitters[0].regressor.__name__, self._n_training_images)
     # small strings about number of channels, channels string and downscale
     down_str = []
     for j in range(self.n_levels):
         if j == self.n_levels - 1:
             down_str.append('(no downscale)')
         else:
             down_str.append('(downscale by {})'.format(
                 self.downscale**(self.n_levels - j - 1)))
     temp_img = Image(image_data=np.random.rand(40, 40))
     if self.pyramid_on_features:
         temp = compute_features(temp_img, self.feature_type[0])
         n_channels = [temp.n_channels] * self.n_levels
     else:
         n_channels = []
         for j in range(self.n_levels):
             temp = compute_features(temp_img, self.feature_type[j])
             n_channels.append(temp.n_channels)
     # string about features and channels
     if self.pyramid_on_features:
         if isinstance(self.feature_type[0], str):
             feat_str = "- Feature is {} with ".format(
                 self.feature_type[0])
         elif self.feature_type[0] is None:
             feat_str = "- No features extracted. "
         else:
             feat_str = "- Feature is {} with ".format(
                 self.feature_type[0].__name__)
         if n_channels[0] == 1:
             ch_str = ["channel"]
         else:
             ch_str = ["channels"]
     else:
         feat_str = []
         ch_str = []
         for j in range(self.n_levels):
             if isinstance(self.feature_type[j], str):
                 feat_str.append("- Feature is {} with ".format(
                     self.feature_type[j]))
             elif self.feature_type[j] is None:
                 feat_str.append("- No features extracted. ")
             else:
                 feat_str.append("- Feature is {} with ".format(
                     self.feature_type[j].__name__))
             if n_channels[j] == 1:
                 ch_str.append("channel")
             else:
                 ch_str.append("channels")
     if self.n_levels > 1:
         out = "{} - Gaussian pyramid with {} levels and downscale " \
               "factor of {}.\n".format(out, self.n_levels,
                                        self.downscale)
         if self.pyramid_on_features:
             out = "{}   - Pyramid was applied on feature space.\n   " \
                   "{}{} {} per image.\n".format(out, feat_str,
                                                 n_channels[0], ch_str[0])
         else:
             out = "{}   - Features were extracted at each pyramid " \
                   "level.\n".format(out)
             for i in range(self.n_levels - 1, -1, -1):
                 out = "{}   - Level {} {}: \n     {}{} {} per " \
                       "image.\n".format(
                       out, self.n_levels - i, down_str[i], feat_str[i],
                       n_channels[i], ch_str[i])
     else:
         if self.pyramid_on_features:
             feat_str = [feat_str]
         out = "{0} - No pyramid used:\n   {1}{2} {3} per image.\n".format(
               out, feat_str[0], n_channels[0], ch_str[0])
     return out
Exemple #11
0
    def train(self, images, group=None, label="all", **kwargs):
        r"""
        """
        print("- Computing reference shape")
        self.reference_shape = self._compute_reference_shape(images, group, label)

        print("- Normalizing object size")
        self._rescale_reference_shape()
        images = [
            i.rescale_to_reference_shape(self.reference_shape, group=group, label=label, interpolator=self.interpolator)
            for i in images
        ]

        print("- Generating multilevel scale space")
        if self.scaled_levels:
            # Gaussian pyramid
            generator = [i.gaussian_pyramid(n_levels=self.n_levels, downscale=self.downscale) for i in images]
        else:
            # Smoothing pyramid
            generator = [i.smoothing_pyramid(n_levels=self.n_levels, downscale=self.downscale) for i in images]

        print("- Generating multilevel feature space")
        images = []
        for _ in np.arange(self.n_levels):
            images.append([compute_features(g.next(), self.feature_type) for g in generator])
        images.reverse()

        print("- Extracting ground truth shapes")
        gt_shapes = [[i.landmarks[group][label].lms for i in img] for img in images]

        print("- Building regressors")
        regressors = []
        # for each level
        for j, (level_images, level_gt_shapes) in enumerate(zip(images, gt_shapes)):
            print(" - Level {}".format(j))

            trainer = self._set_regressor_trainer(j)

            if j == 0:
                regressor = trainer.train(level_images, level_gt_shapes, **kwargs)
            else:
                regressor = trainer.train(level_images, level_gt_shapes, level_shapes, **kwargs)

            print(" - Generating next level data")

            level_shapes = trainer.perturb_shapes(gt_shapes[0])

            regressors.append(regressor)
            count = 0
            total = len(regressors) * len(images[0]) * len(level_shapes[0])
            for k, r in enumerate(regressors):

                test_images = images[k]
                test_gt_shapes = gt_shapes[k]

                fittings = []
                for (i, gt_s, level_s) in zip(test_images, test_gt_shapes, level_shapes):
                    fitting_sublist = []
                    for ls in level_s:
                        fitting = r.fit(i, ls)
                        fitting.gt_shape = gt_s
                        fitting_sublist.append(fitting)
                        count += 1

                    fittings.append(fitting_sublist)
                    print(" - {} % ".format(round(100 * (count + 1) / total)), end="\r")

                if self.scaled_levels:
                    level_shapes = [
                        [
                            Scale(self.downscale, n_dims=self.reference_shape.n_dims).apply(f.final_shape)
                            for f in fitting_sublist
                        ]
                        for fitting_sublist in fittings
                    ]
                else:
                    level_shapes = [[f.final_shape for f in fitting_sublist] for fitting_sublist in fittings]

            mean_error = np.mean(np.array([f.final_error for fitting_sublist in fittings for f in fitting_sublist]))
            print(" - Mean error = {}".format(mean_error))

        return self._build_supervised_descent_fitter(regressors)
Exemple #12
0
    def build(self, images, group=None, label='all', verbose=False):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.

        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.

        label : `string`, optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from lowest
            to highest level
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            self._normalization_wrt_reference_shape(
                images, group, label, self.normalization_diagonal,
                self.interpolator, verbose=verbose)

        # create pyramid
        generators = self._create_pyramid(normalized_images, self.n_levels,
                                          self.downscale,
                                          self.pyramid_on_features,
                                          self.feature_type, verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        appearance_models = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters in form of list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get images of current level
            feature_images = []
            if self.pyramid_on_features:
                # features are already computed, so just call generator
                for c, g in enumerate(generators):
                    if verbose:
                        print_dynamic('{}Rescaling feature space - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                    feature_images.append(g.next())
            else:
                # extract features of images returned from generator
                for c, g in enumerate(generators):
                    if verbose:
                        print_dynamic('{}Computing feature space - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                    feature_images.append(compute_features(
                        g.next(), self.feature_type[rj]))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label].lms for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = self._build_shape_model(
                train_shapes, self.max_shape_components[rj])
            reference_frame = self._build_reference_frame(shape_model.mean)

            # add shape model to the list
            shape_models.append(shape_model)

            # compute transforms
            if verbose:
                print_dynamic('{}Computing transforms'.format(level_str))
            transforms = [self.transform(reference_frame.landmarks['source'].lms,
                                         i.landmarks[group][label].lms)
                          for i in feature_images]

            # warp images to reference frame
            warped_images = []
            for c, (i, t) in enumerate(zip(feature_images, transforms)):
                if verbose:
                    print_dynamic('{}Warping images - {}'.format(
                        level_str,
                        progress_bar_str(float(c + 1) / len(feature_images),
                                         show_bar=False)))
                warped_images.append(i.warp_to(reference_frame.mask, t,
                                               interpolator=self.interpolator))

            # attach reference_frame to images' source shape
            for i in warped_images:
                i.landmarks['source'] = reference_frame.landmarks['source']

            # build appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(level_str))
            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[rj] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[rj])

            # add appearance model to the list
            appearance_models.append(appearance_model)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()
        n_training_images = len(images)

        return self._build_aam(shape_models, appearance_models,
                               n_training_images)
Exemple #13
0
 def __str__(self):
     out = "Supervised Descent Method\n" \
           " - Non-Parametric '{}' Regressor\n" \
           " - {} training images.\n".format(
           self._fitters[0].regressor.__name__, self._n_training_images)
     # small strings about number of channels, channels string and downscale
     down_str = []
     for j in range(self.n_levels):
         if j == self.n_levels - 1:
             down_str.append('(no downscale)')
         else:
             down_str.append('(downscale by {})'.format(
                 self.downscale**(self.n_levels - j - 1)))
     temp_img = Image(image_data=np.random.rand(40, 40))
     if self.pyramid_on_features:
         temp = compute_features(temp_img, self.feature_type[0])
         n_channels = [temp.n_channels] * self.n_levels
     else:
         n_channels = []
         for j in range(self.n_levels):
             temp = compute_features(temp_img, self.feature_type[j])
             n_channels.append(temp.n_channels)
     # string about features and channels
     if self.pyramid_on_features:
         if isinstance(self.feature_type[0], str):
             feat_str = "- Feature is {} with ".format(self.feature_type[0])
         elif self.feature_type[0] is None:
             feat_str = "- No features extracted. "
         else:
             feat_str = "- Feature is {} with ".format(
                 self.feature_type[0].__name__)
         if n_channels[0] == 1:
             ch_str = ["channel"]
         else:
             ch_str = ["channels"]
     else:
         feat_str = []
         ch_str = []
         for j in range(self.n_levels):
             if isinstance(self.feature_type[j], str):
                 feat_str.append("- Feature is {} with ".format(
                     self.feature_type[j]))
             elif self.feature_type[j] is None:
                 feat_str.append("- No features extracted. ")
             else:
                 feat_str.append("- Feature is {} with ".format(
                     self.feature_type[j].__name__))
             if n_channels[j] == 1:
                 ch_str.append("channel")
             else:
                 ch_str.append("channels")
     if self.n_levels > 1:
         out = "{} - Gaussian pyramid with {} levels and downscale " \
               "factor of {}.\n".format(out, self.n_levels,
                                        self.downscale)
         if self.pyramid_on_features:
             out = "{}   - Pyramid was applied on feature space.\n   " \
                   "{}{} {} per image.\n".format(out, feat_str,
                                                 n_channels[0], ch_str[0])
         else:
             out = "{}   - Features were extracted at each pyramid " \
                   "level.\n".format(out)
             for i in range(self.n_levels - 1, -1, -1):
                 out = "{}   - Level {} {}: \n     {}{} {} per " \
                       "image.\n".format(
                       out, self.n_levels - i, down_str[i], feat_str[i],
                       n_channels[i], ch_str[i])
     else:
         if self.pyramid_on_features:
             feat_str = [feat_str]
         out = "{0} - No pyramid used:\n   {1}{2} {3} per image.\n".format(
             out, feat_str[0], n_channels[0], ch_str[0])
     return out
Exemple #14
0
    def build(self, images, group=None, label='all'):
        r"""
        Builds a Multilevel Constrained Local Model from a list of
        landmarked images.

        Parameters
        ----------
        images: list of :class:`menpo.image.Image`
            The set of landmarked images from which to build the AAM.

        group : string, Optional
            The key of the landmark set that should be used. If None,
            and if there is only one set of landmarks, this set will be used.

            Default: None

        label: string, Optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

            Default: 'all'

        Returns
        -------
        aam : :class:`menpo.fitmultiple.clm.builder.CLM`
            The CLM object
        """
        print('- Preprocessing')
        self.reference_shape, generator = self._preprocessing(
            images, group, label, self.diagonal_range, self.interpolator,
            self.scaled_levels, self.n_levels, self.downscale)

        print('- Building model pyramids')
        shape_models = []
        classifiers = []
        # for each level
        for j in np.arange(self.n_levels):
            print(' - Level {}'.format(j))

            print('  - Computing feature space')
            images = [compute_features(g.next(), self.feature_type)
                      for g in generator]
            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label].lms for i in images]

            if j == 0 or self.scaled_levels:
                print('  - Building shape model')
                shape_model = self._build_shape_model(
                    shapes, self.max_shape_components)

            # add shape model to the list
            shape_models.append(shape_model)

            print('  - Building classifiers')
            sampling_grid = build_sampling_grid(self.patch_shape)
            n_points = shapes[0].n_points

            level_classifiers = []
            for k in range(n_points):

                print(' - {} % '.format(round(100*(k+1)/n_points)), end='\r')
                positive_labels = []
                negative_labels = []
                positive_samples = []
                negative_samples = []

                for i, s in zip(images, shapes):

                    max_x = i.shape[0] - 1
                    max_y = i.shape[1] - 1

                    point = (np.round(s.points[k, :])).astype(int)
                    patch_grid = sampling_grid + point[None, None, ...]
                    positive, negative = get_pos_neg_grid_positions(
                        patch_grid, positive_grid_size=(1, 1))

                    x = positive[:, 0]
                    y = positive[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    positive_sample = i.pixels[positive[:, 0],
                                               positive[:, 1], :]
                    positive_samples.append(positive_sample)
                    positive_labels.append(np.ones(positive_sample.shape[0]))

                    x = negative[:, 0]
                    y = negative[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    negative_sample = i.pixels[x, y, :]
                    negative_samples.append(negative_sample)
                    negative_labels.append(-np.ones(negative_sample.shape[0]))

                positive_samples = np.asanyarray(positive_samples)
                positive_samples = np.reshape(positive_samples,
                                              (-1, positive_samples.shape[-1]))
                positive_labels = np.asanyarray(positive_labels).flatten()

                negative_samples = np.asanyarray(negative_samples)
                negative_samples = np.reshape(negative_samples,
                                              (-1, negative_samples.shape[-1]))
                negative_labels = np.asanyarray(negative_labels).flatten()

                X = np.vstack((positive_samples, negative_samples))
                t = np.hstack((positive_labels, negative_labels))

                clf = classifier(X, t, self.classifier_type)
                level_classifiers.append(clf)

            # add level classifiers to the list
            classifiers.append(level_classifiers)

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        classifiers.reverse()

        return CLM(shape_models, classifiers, self.patch_shape,
                   self.feature_type, self.reference_shape, self.downscale,
                   self.scaled_levels, self.interpolator)
Exemple #15
0
    def __str__(self):
        out = "{0} Fitter\n" \
              " - Gradient-Descent {1}\n" \
              " - Transform is {2}.\n" \
              " - {3} training images.\n".format(
              self.clm._str_title, self._fitters[0].algorithm,
              self._fitters[0].transform.__class__.__name__,
              self.clm.n_training_images)
        # small strings about number of channels, channels string and downscale
        down_str = []
        for j in range(self.n_levels):
            if j == self.n_levels - 1:
                down_str.append('(no downscale)')
            else:
                down_str.append('(downscale by {})'.format(
                    self.downscale**(self.n_levels - j - 1)))
        temp_img = Image(image_data=np.random.rand(50, 50))
        if self.pyramid_on_features:
            temp = compute_features(temp_img, self.feature_type[0])
            n_channels = [temp.n_channels] * self.n_levels
        else:
            n_channels = []
            for j in range(self.n_levels):
                temp = compute_features(temp_img, self.feature_type[j])
                n_channels.append(temp.n_channels)
        # string about features and channels
        if self.pyramid_on_features:
            if isinstance(self.feature_type[0], str):
                feat_str = "- Feature is {} with ".format(self.feature_type[0])
            elif self.feature_type[0] is None:
                feat_str = "- No features extracted. "
            else:
                feat_str = "- Feature is {} with ".format(
                    self.feature_type[0].__name__)
            if n_channels[0] == 1:
                ch_str = ["channel"]
            else:
                ch_str = ["channels"]
        else:
            feat_str = []
            ch_str = []
            for j in range(self.n_levels):
                if isinstance(self.feature_type[j], str):
                    feat_str.append("- Feature is {} with ".format(
                        self.feature_type[j]))
                elif self.feature_type[j] is None:
                    feat_str.append("- No features extracted. ")
                else:
                    feat_str.append("- Feature is {} with ".format(
                        self.feature_type[j].__name__))
                if n_channels[j] == 1:
                    ch_str.append("channel")
                else:
                    ch_str.append("channels")
        if self.n_levels > 1:
            if self.clm.scaled_shape_models:
                out = "{} - Gaussian pyramid with {} levels and downscale " \
                      "factor of {}.\n   - Each level has a scaled shape " \
                      "model (reference frame).\n   - Patch size is {}W x " \
                      "{}H.\n".format(out, self.n_levels, self.downscale,
                                      self.clm.patch_shape[1],
                                      self.clm.patch_shape[0])

            else:
                out = "{} - Gaussian pyramid with {} levels and downscale " \
                      "factor of {}:\n   - Shape models (reference frames) " \
                      "are not scaled.\n   - Patch size is {}W x " \
                      "{}H.\n".format(out, self.n_levels, self.downscale,
                                      self.clm.patch_shape[1],
                                      self.clm.patch_shape[0])
            if self.pyramid_on_features:
                out = "{}   - Pyramid was applied on feature space.\n   " \
                      "{}{} {} per image.\n".format(out, feat_str,
                                                    n_channels[0], ch_str[0])
            else:
                out = "{}   - Features were extracted at each pyramid " \
                      "level.\n".format(out)
            for i in range(self.n_levels - 1, -1, -1):
                out = "{}   - Level {} {}: \n".format(out, self.n_levels - i,
                                                      down_str[i])
                if not self.pyramid_on_features:
                    out = "{}     {}{} {} per image.\n".format(
                        out, feat_str[i], n_channels[i], ch_str[i])
                out = "{0}     - {1} motion components\n     - {2} {3} " \
                      "classifiers.\n".format(
                      out, self._fitters[i].transform.n_parameters,
                      len(self._fitters[i].classifiers),
                      self._fitters[i].classifiers[0].__name__)
        else:
            if self.pyramid_on_features:
                feat_str = [feat_str]
            out = "{0} - No pyramid used:\n   {1}{2} {3} per image.\n" \
                  "   - {4} motion components\n   - {5} {6} " \
                  "classifiers.".format(
                  out, feat_str[0], n_channels[0], ch_str[0],
                  out, self._fitters[0].transform.n_parameters,
                  len(self._fitters[0].classifiers),
                  self._fitters[0].classifiers[0].__name__)
        return out
Exemple #16
0
    def build(self, images, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.

        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.

        label : `string`, optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from lowest
            to highest level
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            self._normalization_wrt_reference_shape(
                images, group, label, self.normalization_diagonal,
                self.interpolator, verbose=verbose)

        # create pyramid
        generators = self._create_pyramid(normalized_images,
                                          self.n_levels,
                                          self.downscale,
                                          self.pyramid_on_features,
                                          self.feature_type,
                                          verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        appearance_models = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters in form of list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get images of current level
            feature_images = []
            if self.pyramid_on_features:
                # features are already computed, so just call generator
                for c, g in enumerate(generators):
                    if verbose:
                        print_dynamic('{}Rescaling feature space - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                    feature_images.append(next(g))
            else:
                # extract features of images returned from generator
                for c, g in enumerate(generators):
                    if verbose:
                        print_dynamic('{}Computing feature space - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                    feature_images.append(
                        compute_features(next(g), self.feature_type[rj]))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label] for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = self._build_shape_model(
                train_shapes, self.max_shape_components[rj])
            reference_frame = self._build_reference_frame(shape_model.mean)

            # add shape model to the list
            shape_models.append(shape_model)

            # compute transforms
            if verbose:
                print_dynamic('{}Computing transforms'.format(level_str))
            transforms = [
                self.transform(reference_frame.landmarks['source'].lms,
                               i.landmarks[group][label])
                for i in feature_images
            ]

            # warp images to reference frame
            warped_images = []
            for c, (i, t) in enumerate(zip(feature_images, transforms)):
                if verbose:
                    print_dynamic('{}Warping images - {}'.format(
                        level_str,
                        progress_bar_str(float(c + 1) / len(feature_images),
                                         show_bar=False)))
                warped_images.append(
                    i.warp_to(reference_frame.mask,
                              t,
                              interpolator=self.interpolator))

            # attach reference_frame to images' source shape
            for i in warped_images:
                i.landmarks['source'] = reference_frame.landmarks['source']

            # build appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(level_str))
            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[rj] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[rj])

            # add appearance model to the list
            appearance_models.append(appearance_model)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()
        n_training_images = len(images)

        return self._build_aam(shape_models, appearance_models,
                               n_training_images)
Exemple #17
0
    def build(self, images, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Constrained Local Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`Image`
            The set of landmarked images from which to build the AAM.
        group : string, Optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        label : `string`, optional
            The label of of the landmark manager that you wish to use. If
            ``None``, the convex hull of all landmarks is used.
        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        clm : :map:`CLM`
            The CLM object
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            self._normalization_wrt_reference_shape(
                images, group, label, self.normalization_diagonal,
                self.interpolator, verbose=verbose)

        # create pyramid
        generators = self._create_pyramid(normalized_images,
                                          self.n_levels,
                                          self.downscale,
                                          self.pyramid_on_features,
                                          self.feature_type,
                                          verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        classifiers = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters of type list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get images of current level
            feature_images = []
            if self.pyramid_on_features:
                # features are already computed, so just call generator
                for c, g in enumerate(generators):
                    if verbose:
                        print_dynamic('{}Rescaling feature space - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                    feature_images.append(next(g))
            else:
                # extract features of images returned from generator
                for c, g in enumerate(generators):
                    if verbose:
                        print_dynamic('{}Computing feature space - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                    feature_images.append(
                        compute_features(next(g), self.feature_type[rj]))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label] for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = self._build_shape_model(
                train_shapes, self.max_shape_components[rj])

            # add shape model to the list
            shape_models.append(shape_model)

            # build classifiers
            sampling_grid = build_sampling_grid(self.patch_shape)
            n_points = shapes[0].n_points
            level_classifiers = []
            for k in range(n_points):
                if verbose:
                    print_dynamic('{}Building classifiers - {}'.format(
                        level_str,
                        progress_bar_str((k + 1.) / n_points, show_bar=False)))

                positive_labels = []
                negative_labels = []
                positive_samples = []
                negative_samples = []

                for i, s in zip(feature_images, shapes):

                    max_x = i.shape[0] - 1
                    max_y = i.shape[1] - 1

                    point = (np.round(s.points[k, :])).astype(int)
                    patch_grid = sampling_grid + point[None, None, ...]
                    positive, negative = get_pos_neg_grid_positions(
                        patch_grid, positive_grid_size=(1, 1))

                    x = positive[:, 0]
                    y = positive[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    positive_sample = i.pixels[positive[:, 0], positive[:,
                                                                        1], :]
                    positive_samples.append(positive_sample)
                    positive_labels.append(np.ones(positive_sample.shape[0]))

                    x = negative[:, 0]
                    y = negative[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    negative_sample = i.pixels[x, y, :]
                    negative_samples.append(negative_sample)
                    negative_labels.append(-np.ones(negative_sample.shape[0]))

                positive_samples = np.asanyarray(positive_samples)
                positive_samples = np.reshape(positive_samples,
                                              (-1, positive_samples.shape[-1]))
                positive_labels = np.asanyarray(positive_labels).flatten()

                negative_samples = np.asanyarray(negative_samples)
                negative_samples = np.reshape(negative_samples,
                                              (-1, negative_samples.shape[-1]))
                negative_labels = np.asanyarray(negative_labels).flatten()

                X = np.vstack((positive_samples, negative_samples))
                t = np.hstack((positive_labels, negative_labels))

                clf = classifier(X, t, self.classifier_type[rj])
                level_classifiers.append(clf)

            # add level classifiers to the list
            classifiers.append(level_classifiers)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        classifiers.reverse()
        n_training_images = len(images)

        return CLM(shape_models, classifiers, n_training_images,
                   self.patch_shape, self.feature_type, self.reference_shape,
                   self.downscale, self.scaled_shape_models,
                   self.pyramid_on_features, self.interpolator)
Exemple #18
0
 def _set_up(self):
     # work out feature length per patch
     patch_img = Image.blank(self.patch_shape, fill=0)
     self._feature_patch_length = compute_features(
         patch_img, self.regression_features).n_parameters
Exemple #19
0
    def response_image(self, image, group=None, label=None, level=-1):
        r"""
        Generates a response image result of applying the classifiers of a
        particular pyramidal level of the CLM to an image.

        Parameters
        -----------
        image: :map:`Image`
            The image.
        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        label : `string`, optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.
        level: `int`, optional
            The pyramidal level to be used.

        Returns
        -------
        image : :map:`Image`
            The response image.
        """
        # rescale image
        image = image.rescale_to_reference_shape(self.reference_shape,
                                                 group=group,
                                                 label=label)

        # apply pyramid
        if self.n_levels > 1:
            if self.pyramid_on_features:
                # compute features at highest level
                feature_image = compute_features(image, self.feature_type[0])

                # apply pyramid on feature image
                pyramid = feature_image.gaussian_pyramid(
                    n_levels=self.n_levels, downscale=self.downscale)

                # get rescaled feature images
                images = list(pyramid)
            else:
                # create pyramid on intensities image
                pyramid = image.gaussian_pyramid(n_levels=self.n_levels,
                                                 downscale=self.downscale)

                # compute features at each level
                images = [
                    compute_features(i,
                                     self.feature_type[self.n_levels - j - 1])
                    for j, i in enumerate(pyramid)
                ]
            images.reverse()
        else:
            images = [compute_features(image, self.feature_type[0])]

        # initialize responses
        image = images[level]
        image_pixels = np.reshape(image.pixels, (-1, image.n_channels))
        response_data = np.zeros((image.shape[0], image.shape[1],
                                  self.n_classifiers_per_level[level]))
        # Compute responses
        for j, clf in enumerate(self.classifiers[level]):
            response_data[:, :, j] = np.reshape(clf(image_pixels), image.shape)
        return Image(image_data=response_data)
Exemple #20
0
    def build(self, images, group=None, label="all", verbose=False):
        r"""
        Builds a Multilevel Constrained Local Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`Image`
            The set of landmarked images from which to build the AAM.

        group : string, Optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.

        label : `string`, optional
            The label of of the landmark manager that you wish to use. If
            ``None``, the convex hull of all landmarks is used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        clm : :map:`CLM`
            The CLM object
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = self._normalization_wrt_reference_shape(
            images, group, label, self.normalization_diagonal, self.interpolator, verbose=verbose
        )

        # create pyramid
        generators = self._create_pyramid(
            normalized_images,
            self.n_levels,
            self.downscale,
            self.pyramid_on_features,
            self.feature_type,
            verbose=verbose,
        )

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic("- Building model for each of the {} pyramid " "levels\n".format(self.n_levels))
            else:
                print_dynamic("- Building model\n")

        shape_models = []
        classifiers = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters of type list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = "  - "
                if self.n_levels > 1:
                    level_str = "  - Level {}: ".format(j + 1)

            # get images of current level
            feature_images = []
            if self.pyramid_on_features:
                # features are already computed, so just call generator
                for c, g in enumerate(generators):
                    if verbose:
                        print_dynamic(
                            "{}Rescaling feature space - {}".format(
                                level_str, progress_bar_str((c + 1.0) / len(generators), show_bar=False)
                            )
                        )
                    feature_images.append(g.next())
            else:
                # extract features of images returned from generator
                for c, g in enumerate(generators):
                    if verbose:
                        print_dynamic(
                            "{}Computing feature space - {}".format(
                                level_str, progress_bar_str((c + 1.0) / len(generators), show_bar=False)
                            )
                        )
                    feature_images.append(compute_features(g.next(), self.feature_type[rj]))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label].lms for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic("{}Building shape model".format(level_str))
            shape_model = self._build_shape_model(train_shapes, self.max_shape_components[rj])

            # add shape model to the list
            shape_models.append(shape_model)

            # build classifiers
            sampling_grid = build_sampling_grid(self.patch_shape)
            n_points = shapes[0].n_points
            level_classifiers = []
            for k in range(n_points):
                if verbose:
                    print_dynamic(
                        "{}Building classifiers - {}".format(
                            level_str, progress_bar_str((k + 1.0) / n_points, show_bar=False)
                        )
                    )

                positive_labels = []
                negative_labels = []
                positive_samples = []
                negative_samples = []

                for i, s in zip(feature_images, shapes):

                    max_x = i.shape[0] - 1
                    max_y = i.shape[1] - 1

                    point = (np.round(s.points[k, :])).astype(int)
                    patch_grid = sampling_grid + point[None, None, ...]
                    positive, negative = get_pos_neg_grid_positions(patch_grid, positive_grid_size=(1, 1))

                    x = positive[:, 0]
                    y = positive[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    positive_sample = i.pixels[positive[:, 0], positive[:, 1], :]
                    positive_samples.append(positive_sample)
                    positive_labels.append(np.ones(positive_sample.shape[0]))

                    x = negative[:, 0]
                    y = negative[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    negative_sample = i.pixels[x, y, :]
                    negative_samples.append(negative_sample)
                    negative_labels.append(-np.ones(negative_sample.shape[0]))

                positive_samples = np.asanyarray(positive_samples)
                positive_samples = np.reshape(positive_samples, (-1, positive_samples.shape[-1]))
                positive_labels = np.asanyarray(positive_labels).flatten()

                negative_samples = np.asanyarray(negative_samples)
                negative_samples = np.reshape(negative_samples, (-1, negative_samples.shape[-1]))
                negative_labels = np.asanyarray(negative_labels).flatten()

                X = np.vstack((positive_samples, negative_samples))
                t = np.hstack((positive_labels, negative_labels))

                clf = classifier(X, t, self.classifier_type[rj])
                level_classifiers.append(clf)

            # add level classifiers to the list
            classifiers.append(level_classifiers)

            if verbose:
                print_dynamic("{}Done\n".format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        classifiers.reverse()
        n_training_images = len(images)

        return CLM(
            shape_models,
            classifiers,
            n_training_images,
            self.patch_shape,
            self.feature_type,
            self.reference_shape,
            self.downscale,
            self.scaled_shape_models,
            self.pyramid_on_features,
            self.interpolator,
        )
Exemple #21
0
    def __str__(self):
        out = "{0} Fitter\n" \
              " - Gradient-Descent {1}\n" \
              " - Transform is {2}.\n" \
              " - {3} training images.\n".format(
              self.clm._str_title, self._fitters[0].algorithm,
              self._fitters[0].transform.__class__.__name__,
              self.clm.n_training_images)
        # small strings about number of channels, channels string and downscale
        down_str = []
        for j in range(self.n_levels):
            if j == self.n_levels - 1:
                down_str.append('(no downscale)')
            else:
                down_str.append('(downscale by {})'.format(
                    self.downscale**(self.n_levels - j - 1)))
        temp_img = Image(image_data=np.random.rand(50, 50))
        if self.pyramid_on_features:
            temp = compute_features(temp_img, self.feature_type[0])
            n_channels = [temp.n_channels] * self.n_levels
        else:
            n_channels = []
            for j in range(self.n_levels):
                temp = compute_features(temp_img, self.feature_type[j])
                n_channels.append(temp.n_channels)
        # string about features and channels
        if self.pyramid_on_features:
            if isinstance(self.feature_type[0], str):
                feat_str = "- Feature is {} with ".format(
                    self.feature_type[0])
            elif self.feature_type[0] is None:
                feat_str = "- No features extracted. "
            else:
                feat_str = "- Feature is {} with ".format(
                    self.feature_type[0].__name__)
            if n_channels[0] == 1:
                ch_str = ["channel"]
            else:
                ch_str = ["channels"]
        else:
            feat_str = []
            ch_str = []
            for j in range(self.n_levels):
                if isinstance(self.feature_type[j], str):
                    feat_str.append("- Feature is {} with ".format(
                        self.feature_type[j]))
                elif self.feature_type[j] is None:
                    feat_str.append("- No features extracted. ")
                else:
                    feat_str.append("- Feature is {} with ".format(
                        self.feature_type[j].__name__))
                if n_channels[j] == 1:
                    ch_str.append("channel")
                else:
                    ch_str.append("channels")
        if self.n_levels > 1:
            if self.clm.scaled_shape_models:
                out = "{} - Gaussian pyramid with {} levels and downscale " \
                      "factor of {}.\n   - Each level has a scaled shape " \
                      "model (reference frame).\n   - Patch size is {}W x " \
                      "{}H.\n".format(out, self.n_levels, self.downscale,
                                      self.clm.patch_shape[1],
                                      self.clm.patch_shape[0])

            else:
                out = "{} - Gaussian pyramid with {} levels and downscale " \
                      "factor of {}:\n   - Shape models (reference frames) " \
                      "are not scaled.\n   - Patch size is {}W x " \
                      "{}H.\n".format(out, self.n_levels, self.downscale,
                                      self.clm.patch_shape[1],
                                      self.clm.patch_shape[0])
            if self.pyramid_on_features:
                out = "{}   - Pyramid was applied on feature space.\n   " \
                      "{}{} {} per image.\n".format(out, feat_str,
                                                    n_channels[0], ch_str[0])
            else:
                out = "{}   - Features were extracted at each pyramid " \
                      "level.\n".format(out)
            for i in range(self.n_levels - 1, -1, -1):
                out = "{}   - Level {} {}: \n".format(out, self.n_levels - i,
                                                      down_str[i])
                if not self.pyramid_on_features:
                    out = "{}     {}{} {} per image.\n".format(
                        out, feat_str[i], n_channels[i], ch_str[i])
                out = "{0}     - {1} motion components\n     - {2} {3} " \
                      "classifiers.\n".format(
                      out, self._fitters[i].transform.n_parameters,
                      len(self._fitters[i].classifiers),
                      self._fitters[i].classifiers[0].__name__)
        else:
            if self.pyramid_on_features:
                feat_str = [feat_str]
            out = "{0} - No pyramid used:\n   {1}{2} {3} per image.\n" \
                  "   - {4} motion components\n   - {5} {6} " \
                  "classifiers.".format(
                  out, feat_str[0], n_channels[0], ch_str[0],
                  out, self._fitters[0].transform.n_parameters,
                  len(self._fitters[0].classifiers),
                  self._fitters[0].classifiers[0].__name__)
        return out
Exemple #22
0
    def __str__(self):
        out = "{}\n - {} training images.\n".format(self._str_title, self.n_training_images)
        # small strings about number of channels, channels string and downscale
        down_str = []
        for j in range(self.n_levels):
            if j == self.n_levels - 1:
                down_str.append("(no downscale)")
            else:
                down_str.append("(downscale by {})".format(self.downscale ** (self.n_levels - j - 1)))
        temp_img = Image(image_data=np.random.rand(50, 50))
        if self.pyramid_on_features:
            temp = compute_features(temp_img, self.feature_type[0])
            n_channels = [temp.n_channels] * self.n_levels
        else:
            n_channels = []
            for j in range(self.n_levels):
                temp = compute_features(temp_img, self.feature_type[j])
                n_channels.append(temp.n_channels)
        # string about features and channels
        if self.pyramid_on_features:
            if isinstance(self.feature_type[0], str):
                feat_str = "- Feature is {} with ".format(self.feature_type[0])
            elif self.feature_type[0] is None:
                feat_str = "- No features extracted. "
            else:
                feat_str = "- Feature is {} with ".format(self.feature_type[0].func_name)
            if n_channels[0] == 1:
                ch_str = ["channel"]
            else:
                ch_str = ["channels"]
        else:
            feat_str = []
            ch_str = []
            for j in range(self.n_levels):
                if isinstance(self.feature_type[j], str):
                    feat_str.append("- Feature is {} with ".format(self.feature_type[j]))
                elif self.feature_type[j] is None:
                    feat_str.append("- No features extracted. ")
                else:
                    feat_str.append("- Feature is {} with ".format(self.feature_type[j].func_name))
                if n_channels[j] == 1:
                    ch_str.append("channel")
                else:
                    ch_str.append("channels")
        if self.n_levels > 1:
            if self.scaled_shape_models:
                out = (
                    "{} - Gaussian pyramid with {} levels and downscale "
                    "factor of {}.\n   - Each level has a scaled shape "
                    "model (reference frame).\n   - Patch size is {}W x "
                    "{}H.\n".format(out, self.n_levels, self.downscale, self.patch_shape[1], self.patch_shape[0])
                )

            else:
                out = (
                    "{} - Gaussian pyramid with {} levels and downscale "
                    "factor of {}:\n   - Shape models (reference frames) "
                    "are not scaled.\n   - Patch size is {}W x "
                    "{}H.\n".format(out, self.n_levels, self.downscale, self.patch_shape[1], self.patch_shape[0])
                )
            if self.pyramid_on_features:
                out = "{}   - Pyramid was applied on feature space.\n   " "{}{} {} per image.\n".format(
                    out, feat_str, n_channels[0], ch_str[0]
                )
            else:
                out = "{}   - Features were extracted at each pyramid " "level.\n".format(out)
            for i in range(self.n_levels - 1, -1, -1):
                out = "{}   - Level {} {}: \n".format(out, self.n_levels - i, down_str[i])
                if not self.pyramid_on_features:
                    out = "{}     {}{} {} per image.\n".format(out, feat_str[i], n_channels[i], ch_str[i])
                out = "{0}     - {1} shape components ({2:.2f}% of " "variance)\n     - {3} {4} classifiers.\n".format(
                    out,
                    self.shape_models[i].n_components,
                    self.shape_models[i].variance_ratio * 100,
                    self.n_classifiers_per_level[i],
                    self.classifiers[i][0].func_name,
                )
        else:
            if self.pyramid_on_features:
                feat_str = [feat_str]
            out = (
                "{0} - No pyramid used:\n   {1}{2} {3} per image.\n"
                "   - {4} shape components ({5:.2f}% of "
                "variance)\n   - {6} {7} classifiers.".format(
                    out,
                    feat_str[0],
                    n_channels[0],
                    ch_str[0],
                    self.shape_models[0].n_components,
                    self.shape_models[0].variance_ratio * 100,
                    self.n_classifiers_per_level[0],
                    self.classifiers[0][0].func_name,
                )
            )
        return out
Exemple #23
0
 def features(self, image, shape):
     patches = extract_local_patches(image, shape, self.sampling_grid)
     features = [compute_features(p, self.regression_features).pixels.ravel()
                 for p in patches]
     return np.hstack((np.asarray(features).ravel(), 1))
Exemple #24
0
    def train(self, images, group=None, label='all', **kwargs):
        r"""
        """
        print('- Computing reference shape')
        self.reference_shape = self._compute_reference_shape(
            images, group, label)

        print('- Normalizing object size')
        self._rescale_reference_shape()
        images = [
            i.rescale_to_reference_shape(self.reference_shape,
                                         group=group,
                                         label=label,
                                         interpolator=self.interpolator)
            for i in images
        ]

        print('- Generating multilevel scale space')
        if self.scaled_levels:
            # Gaussian pyramid
            generator = [
                i.gaussian_pyramid(n_levels=self.n_levels,
                                   downscale=self.downscale) for i in images
            ]
        else:
            # Smoothing pyramid
            generator = [
                i.smoothing_pyramid(n_levels=self.n_levels,
                                    downscale=self.downscale) for i in images
            ]

        print('- Generating multilevel feature space')
        images = []
        for _ in np.arange(self.n_levels):
            images.append([
                compute_features(g.next(), self.feature_type)
                for g in generator
            ])
        images.reverse()

        print('- Extracting ground truth shapes')
        gt_shapes = [[i.landmarks[group][label].lms for i in img]
                     for img in images]

        print('- Building regressors')
        regressors = []
        # for each level
        for j, (level_images,
                level_gt_shapes) in enumerate(zip(images, gt_shapes)):
            print(' - Level {}'.format(j))

            trainer = self._set_regressor_trainer(j)

            if j == 0:
                regressor = trainer.train(level_images, level_gt_shapes,
                                          **kwargs)
            else:
                regressor = trainer.train(level_images, level_gt_shapes,
                                          level_shapes, **kwargs)

            print(' - Generating next level data')

            level_shapes = trainer.perturb_shapes(gt_shapes[0])

            regressors.append(regressor)
            count = 0
            total = len(regressors) * len(images[0]) * len(level_shapes[0])
            for k, r in enumerate(regressors):

                test_images = images[k]
                test_gt_shapes = gt_shapes[k]

                fittings = []
                for (i, gt_s, level_s) in zip(test_images, test_gt_shapes,
                                              level_shapes):
                    fitting_sublist = []
                    for ls in level_s:
                        fitting = r.fit(i, ls)
                        fitting.gt_shape = gt_s
                        fitting_sublist.append(fitting)
                        count += 1

                    fittings.append(fitting_sublist)
                    print(' - {} % '.format(round(100 * (count + 1) / total)),
                          end='\r')

                if self.scaled_levels:
                    level_shapes = [[
                        Scale(self.downscale,
                              n_dims=self.reference_shape.n_dims).apply(
                                  f.final_shape) for f in fitting_sublist
                    ] for fitting_sublist in fittings]
                else:
                    level_shapes = [[f.final_shape for f in fitting_sublist]
                                    for fitting_sublist in fittings]

            mean_error = np.mean(
                np.array([
                    f.final_error for fitting_sublist in fittings
                    for f in fitting_sublist
                ]))
            print(' - Mean error = {}'.format(mean_error))

        return self._build_supervised_descent_fitter(regressors)
Exemple #25
0
    def build(self, images, group=None, label='all'):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images: list of :class:`menpo.image.Image`
            The set of landmarked images from which to build the AAM.

        group : string, Optional
            The key of the landmark set that should be used. If None,
            and if there is only one set of landmarks, this set will be used.

            Default: None

        label: string, Optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

            Default: 'all'

        Returns
        -------
        aam : :class:`menpo.fitmultiple.aam.builder.AAM`
            The AAM object
        """
        print '- Preprocessing'
        self.reference_shape, generator = self._preprocessing(
            images, group, label, self.diagonal_range, self.interpolator,
            self.scaled_levels, self.n_levels, self.downscale)

        print '- Building model pyramids'
        shape_models = []
        appearance_models = []
        # for each level
        for j in np.arange(self.n_levels):
            print ' - Level {}'.format(j)

            print '  - Computing feature space'
            images = [
                compute_features(g.next(), self.feature_type)
                for g in generator
            ]
            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label].lms for i in images]

            if j == 0 or self.scaled_levels:
                print '  - Building shape model'
                if j != 0:
                    shapes = [
                        Scale(1 / self.downscale,
                              n_dims=shapes[0].n_dims).apply(s) for s in shapes
                    ]
                shape_model = self._build_shape_model(
                    shapes, self.max_shape_components)

                print '  - Building reference frame'
                reference_frame = self._build_reference_frame(shape_model.mean)

            # add shape model to the list
            shape_models.append(shape_model)

            print '  - Computing transforms'
            transforms = [
                self.transform(reference_frame.landmarks['source'].lms,
                               i.landmarks[group][label].lms) for i in images
            ]

            print '  - Warping images'
            images = [
                i.warp_to(reference_frame.mask,
                          t,
                          interpolator=self.interpolator)
                for i, t in zip(images, transforms)
            ]

            for i in images:
                i.landmarks['source'] = reference_frame.landmarks['source']
                self._mask_image(i)

            print '  - Building appearance model'
            appearance_model = PCAModel(images)
            # trim appearance model if required
            if self.max_appearance_components is not None:
                appearance_model.trim_components(
                    self.max_appearance_components)

            # add appearance model to the list
            appearance_models.append(appearance_model)

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()

        return self._build_aam(shape_models, appearance_models)