Ejemplo n.º 1
0
    def __init__(self, images, group=None, bounding_box_group_glob=None,
                 verbose=False, reference_shape=None, diagonal=None,
                 scales=(0.5, 1.0), n_perturbations=30, n_dlib_perturbations=1,
                 perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
                 n_iterations=10, feature_padding=0, n_pixel_pairs=400,
                 distance_prior_weighting=0.1, regularisation_weight=0.1,
                 n_split_tests=20, n_trees=500, n_tree_levels=5):

        checks.check_diagonal(diagonal)

        self.diagonal = diagonal
        self.scales = checks.check_scales(scales)
        self.holistic_features = checks.check_features(no_op, self.n_scales)
        self.reference_shape = reference_shape
        self.n_perturbations = n_perturbations
        self.n_iterations = checks.check_max_iters(n_iterations, self.n_scales)
        self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box
        self._setup_dlib_options(feature_padding, n_pixel_pairs,
                                 distance_prior_weighting,
                                 regularisation_weight, n_split_tests, n_trees,
                                 n_dlib_perturbations, n_tree_levels)
        self._setup_algorithms()

        # Train DLIB over multiple scales
        self._train(images, group=group,
                    bounding_box_group_glob=bounding_box_group_glob,
                    verbose=verbose)
Ejemplo n.º 2
0
    def __init__(self, images, group=None, bounding_box_group_glob=None,
                 reference_shape=None, sd_algorithm_cls=Newton,
                 holistic_features=no_op, patch_features=no_op,
                 patch_shape=(17, 17), diagonal=None, scales=(0.5, 1.0),
                 n_iterations=6, n_perturbations=30,
                 perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
                 batch_size=None, verbose=False):
        # check parameters
        checks.check_diagonal(diagonal)
        scales = checks.check_scales(scales)
        n_scales = len(scales)
        patch_features = checks.check_features(patch_features, n_scales)
        holistic_features = checks.check_features(holistic_features, n_scales)
        patch_shape = checks.check_patch_shape(patch_shape, n_scales)
        # set parameters
        self.algorithms = []
        self.reference_shape = reference_shape
        self._sd_algorithm_cls = sd_algorithm_cls
        self.holistic_features = holistic_features
        self.patch_features = patch_features
        self.patch_shape = patch_shape
        self.diagonal = diagonal
        self.scales = scales
        self.n_perturbations = n_perturbations
        self.n_iterations = checks.check_max_iters(n_iterations, n_scales)
        self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box
        # set up algorithms
        self._setup_algorithms()

        # Now, train the model!
        self._train(images,increment=False,  group=group,
                    bounding_box_group_glob=bounding_box_group_glob,
                    verbose=verbose, batch_size=batch_size)
Ejemplo n.º 3
0
    def _fit(self, images, initial_shape, gt_shapes=None, max_iters=20,
             **kwargs):
        # Perform check
        max_iters = checks.check_max_iters(max_iters, self.n_scales)

        # Set initial and ground truth shapes
        shape = initial_shape
        gt_shape = None

        # Initialize list of algorithm results
        algorithm_results = []
        for i in range(self.n_scales):
            # Handle ground truth shape
            if gt_shapes is not None:
                gt_shape = gt_shapes[i]

            # Run algorithm
            algorithm_result = self.algorithms[i].run(images[i], shape,
                                                      gt_shape=gt_shape,
                                                      max_iters=max_iters[i],
                                                      **kwargs)
            # Add algorithm result to the list
            algorithm_results.append(algorithm_result)

            # Prepare this scale's final shape for the next scale
            shape = algorithm_result.final_shape
            if self.scales[i] != self.scales[-1]:
                shape = Scale(self.scales[i + 1] / self.scales[i],
                              n_dims=shape.n_dims).apply(shape)

        # Return list of algorithm results
        return algorithm_results
Ejemplo n.º 4
0
    def _fit(self, images, initial_shape, gt_shapes=None, max_iters=20,
             **kwargs):
        r"""
        Fits the fitter to the multilevel pyramidal images.

        Parameters
        -----------
        images: :class:`menpo.image.masked.MaskedImage` list
            The images to be fitted.
        initial_shape: :class:`menpo.shape.PointCloud`
            The initial shape from which the fitting will start.
        gt_shapes: :class:`menpo.shape.PointCloud` list, optional
            The original ground truth shapes associated to the multilevel
            images.
        max_iters: int or list, optional
            The maximum number of iterations.
            If int, then this will be the overall maximum number of iterations
            for all the pyramidal levels.
            If list, then a maximum number of iterations is specified for each
            pyramidal level.

        Returns
        -------
        algorithm_results: :class:`FittingResult` list
            The fitting object containing the state of the whole fitting
            procedure.
        """
        # Perform check
        max_iters = checks.check_max_iters(max_iters, self.n_scales)

        # Set initial and ground truth shapes
        shape = initial_shape
        gt_shape = None

        # Initialize list of algorithm results
        algorithm_results = []
        for i in range(self.n_scales):
            # Handle ground truth shape
            if gt_shapes is not None:
                gt_shape = gt_shapes[i]

            # Run algorithm
            algorithm_result = self.algorithms[i].run(images[i], shape,
                                                      gt_shape=gt_shape,
                                                      max_iters=max_iters[i],
                                                      **kwargs)
            # Add algorithm result to the list
            algorithm_results.append(algorithm_result)

            # Prepare this scale's final shape for the next scale
            shape = algorithm_result.final_shape
            if self.scales[i] != self.scales[-1]:
                shape = Scale(self.scales[i + 1] / self.scales[i],
                              n_dims=shape.n_dims).apply(shape)

        # Return list of algorithm results
        return algorithm_results
    def __init__(self,
                 images,
                 group=None,
                 bounding_box_group_glob=None,
                 reference_shape=None,
                 diagonal=None,
                 scales=(0.5, 1.0),
                 n_perturbations=30,
                 n_dlib_perturbations=1,
                 perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
                 n_iterations=10,
                 feature_padding=0,
                 n_pixel_pairs=400,
                 distance_prior_weighting=0.1,
                 regularisation_weight=0.1,
                 n_split_tests=20,
                 n_trees=500,
                 n_tree_levels=5,
                 verbose=False):
        checks.check_diagonal(diagonal)
        scales = checks.check_scales(scales)
        n_scales = len(scales)
        # Dummy option that is required by _prepare_image of MultiFitter.
        holistic_features = checks.check_callable(no_op, n_scales)

        # Call superclass
        super(DlibERT, self).__init__(scales=scales,
                                      reference_shape=reference_shape,
                                      holistic_features=holistic_features,
                                      algorithms=[])

        # Set parameters
        self.diagonal = diagonal
        self.n_perturbations = n_perturbations
        self.n_iterations = checks.check_max_iters(n_iterations, n_scales)
        self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box

        # DLib options
        self._setup_dlib_options(feature_padding, n_pixel_pairs,
                                 distance_prior_weighting,
                                 regularisation_weight, n_split_tests, n_trees,
                                 n_dlib_perturbations, n_tree_levels)

        # Set-up algorithms
        for j in range(self.n_scales):
            self.algorithms.append(
                DlibAlgorithm(self._dlib_options_templates[j],
                              n_iterations=self.n_iterations[j]))

        # Train DLIB over multiple scales
        self._train(images,
                    group=group,
                    bounding_box_group_glob=bounding_box_group_glob,
                    verbose=verbose)
Ejemplo n.º 6
0
    def __init__(
        self,
        images,
        group=None,
        bounding_box_group_glob=None,
        reference_shape=None,
        sd_algorithm_cls=None,
        holistic_features=no_op,
        patch_features=no_op,
        patch_shape=(17, 17),
        diagonal=None,
        scales=(0.5, 1.0),
        n_iterations=3,
        n_perturbations=30,
        perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
        batch_size=None,
        verbose=False,
    ):

        if batch_size is not None:
            raise NotImplementedError("Training an SDM with a batch size " "(incrementally) is not implemented yet.")
        # check parameters
        checks.check_diagonal(diagonal)
        scales = checks.check_scales(scales)
        n_scales = len(scales)
        patch_features = checks.check_callable(patch_features, n_scales)
        sd_algorithm_cls = checks.check_callable(sd_algorithm_cls, n_scales)
        holistic_features = checks.check_callable(holistic_features, n_scales)
        patch_shape = checks.check_patch_shape(patch_shape, n_scales)
        # set parameters
        self.algorithms = []
        self.reference_shape = reference_shape
        self._sd_algorithm_cls = sd_algorithm_cls
        self.holistic_features = holistic_features
        self.patch_features = patch_features
        self.patch_shape = patch_shape
        self.diagonal = diagonal
        self.scales = scales
        self.n_perturbations = n_perturbations
        self.n_iterations = checks.check_max_iters(n_iterations, n_scales)
        self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box
        # set up algorithms
        self._setup_algorithms()

        # Now, train the model!
        self._train(
            images,
            increment=False,
            group=group,
            bounding_box_group_glob=bounding_box_group_glob,
            verbose=verbose,
            batch_size=batch_size,
        )
Ejemplo n.º 7
0
    def __init__(self,
                 images,
                 group=None,
                 bounding_box_group_glob=None,
                 reference_shape=None,
                 sd_algorithm_cls=None,
                 holistic_features=no_op,
                 patch_features=no_op,
                 patch_shape=(17, 17),
                 diagonal=None,
                 scales=(0.5, 1.0),
                 n_iterations=3,
                 n_perturbations=30,
                 perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
                 batch_size=None,
                 verbose=False):

        if batch_size is not None:
            raise NotImplementedError(
                'Training an SDM with a batch size '
                '(incrementally) is not implemented yet.')
        # check parameters
        checks.check_diagonal(diagonal)
        scales = checks.check_scales(scales)
        n_scales = len(scales)
        patch_features = checks.check_callable(patch_features, n_scales)
        sd_algorithm_cls = checks.check_callable(sd_algorithm_cls, n_scales)
        holistic_features = checks.check_callable(holistic_features, n_scales)
        patch_shape = checks.check_patch_shape(patch_shape, n_scales)
        # set parameters
        self.algorithms = []
        self.reference_shape = reference_shape
        self._sd_algorithm_cls = sd_algorithm_cls
        self.holistic_features = holistic_features
        self.patch_features = patch_features
        self.patch_shape = patch_shape
        self.diagonal = diagonal
        self.scales = scales
        self.n_perturbations = n_perturbations
        self.n_iterations = checks.check_max_iters(n_iterations, n_scales)
        self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box
        # set up algorithms
        self._setup_algorithms()

        # Now, train the model!
        self._train(images,
                    increment=False,
                    group=group,
                    bounding_box_group_glob=bounding_box_group_glob,
                    verbose=verbose,
                    batch_size=batch_size)
Ejemplo n.º 8
0
    def __init__(self,
                 images,
                 group=None,
                 bounding_box_group_glob=None,
                 verbose=False,
                 reference_shape=None,
                 diagonal=None,
                 scales=(0.5, 1.0),
                 n_perturbations=30,
                 n_dlib_perturbations=1,
                 perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
                 n_iterations=10,
                 feature_padding=0,
                 n_pixel_pairs=400,
                 distance_prior_weighting=0.1,
                 regularisation_weight=0.1,
                 n_split_tests=20,
                 n_trees=500,
                 n_tree_levels=5):

        checks.check_diagonal(diagonal)

        self.diagonal = diagonal
        self.scales = checks.check_scales(scales)
        self.holistic_features = checks.check_callable(no_op, self.n_scales)
        self.reference_shape = reference_shape
        self.n_perturbations = n_perturbations
        self.n_iterations = checks.check_max_iters(n_iterations, self.n_scales)
        self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box
        self._setup_dlib_options(feature_padding, n_pixel_pairs,
                                 distance_prior_weighting,
                                 regularisation_weight, n_split_tests, n_trees,
                                 n_dlib_perturbations, n_tree_levels)
        self._setup_algorithms()

        # Train DLIB over multiple scales
        self._train(images,
                    group=group,
                    bounding_box_group_glob=bounding_box_group_glob,
                    verbose=verbose)
Ejemplo n.º 9
0
    def __init__(self, images, group=None, bounding_box_group_glob=None,
                 reference_shape=None, diagonal=None, scales=(0.5, 1.0),
                 n_perturbations=30, n_dlib_perturbations=1,
                 perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
                 n_iterations=10, feature_padding=0, n_pixel_pairs=400,
                 distance_prior_weighting=0.1, regularisation_weight=0.1,
                 n_split_tests=20, n_trees=500, n_tree_levels=5, verbose=False):
        checks.check_diagonal(diagonal)
        scales = checks.check_scales(scales)
        n_scales = len(scales)
        # Dummy option that is required by _prepare_image of MultiFitter.
        holistic_features = checks.check_callable(no_op, n_scales)

        # Call superclass
        super(DlibERT, self).__init__(
            scales=scales, reference_shape=reference_shape,
            holistic_features=holistic_features, algorithms=[])

        # Set parameters
        self.diagonal = diagonal
        self.n_perturbations = n_perturbations
        self.n_iterations = checks.check_max_iters(n_iterations, n_scales)
        self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box

        # DLib options
        self._setup_dlib_options(feature_padding, n_pixel_pairs,
                                 distance_prior_weighting,
                                 regularisation_weight, n_split_tests, n_trees,
                                 n_dlib_perturbations, n_tree_levels)

        # Set-up algorithms
        for j in range(self.n_scales):
            self.algorithms.append(DlibAlgorithm(
                self._dlib_options_templates[j],
                n_iterations=self.n_iterations[j]))

        # Train DLIB over multiple scales
        self._train(images, group=group,
                    bounding_box_group_glob=bounding_box_group_glob,
                    verbose=verbose)
Ejemplo n.º 10
0
    def _fit(self,
             images,
             initial_shape,
             affine_transforms,
             scale_transforms,
             gt_shapes=None,
             max_iters=20,
             return_costs=False,
             **kwargs):
        r"""
        Function the applies the multi-scale fitting procedure on an image, given
        the initial shape.

        Parameters
        ----------
        images : `list` of `menpo.image.Image`
            The list of images per scale.
        initial_shape : `menpo.shape.PointCloud`
            The initial shape estimate from which the fitting procedure
            will start.
        affine_transforms : `list` of `menpo.transform.Affine`
            The list of affine transforms per scale that are the inverses of the
            transformations introduced by the rescale wrt the reference shape as
            well as the feature extraction.
        scale_transforms : `list` of `menpo.shape.Scale`
            The list of inverse scaling transforms per scale.
        gt_shapes : `list` of `menpo.shape.PointCloud`
            The list of ground truth shapes per scale.
        max_iters : `int` or `list` of `int`, optional
            The maximum number of iterations. If `int`, then it specifies the
            maximum number of iterations over all scales. If `list` of `int`,
            then specifies the maximum number of iterations per scale.
        return_costs : `bool`, optional
            If ``True``, then the cost function values will be computed
            during the fitting procedure. Then these cost values will be
            assigned to the returned `fitting_result`. *Note that the costs
            computation increases the computational cost of the fitting. The
            additional computation cost depends on the fitting method. Only
            use this option for research purposes.*
        kwargs : `dict`, optional
            Additional keyword arguments that can be passed to specific
            implementations.

        Returns
        -------
        algorithm_results : `list` of :map:`NonParametricIterativeResult` or subclass
            The list of fitting result per scale.
        """
        # Check max iters
        max_iters = checks.check_max_iters(max_iters, self.n_scales)

        # Set initial and ground truth shapes
        shape = initial_shape
        gt_shape = None

        # Initialize list of algorithm results
        algorithm_results = []
        for i in range(self.n_scales):
            # Handle ground truth shape
            if gt_shapes is not None:
                gt_shape = gt_shapes[i]

            # Run algorithm
            algorithm_result = self.algorithms[i].run(
                images[i],
                shape,
                gt_shape=gt_shape,
                max_iters=max_iters[i],
                return_costs=return_costs,
                **kwargs)
            # Add algorithm result to the list
            algorithm_results.append(algorithm_result)

            # Prepare this scale's final shape for the next scale
            if i < self.n_scales - 1:
                # This should not be done for the last scale.
                shape = algorithm_result.final_shape
                if self.holistic_features[i + 1] != self.holistic_features[i]:
                    # If the features function of the current scale is different
                    # than the one of the next scale, this means that the affine
                    # transform is different as well. Thus we need to do the
                    # following composition:
                    #
                    #    S_{i+1} \circ A_{i+1} \circ inv(A_i) \circ inv(S_i)
                    #
                    # where:
                    #    S_i : scaling transform of current scale
                    #    S_{i+1} : scaling transform of next scale
                    #    A_i : affine transform of current scale
                    #    A_{i+1} : affine transform of next scale
                    t1 = scale_transforms[i].compose_after(
                        affine_transforms[i])
                    t2 = affine_transforms[i +
                                           1].pseudoinverse().compose_after(t1)
                    transform = scale_transforms[
                        i + 1].pseudoinverse().compose_after(t2)
                    shape = transform.apply(shape)
                elif (self.holistic_features[i + 1]
                      == self.holistic_features[i]
                      and self.scales[i] != self.scales[i + 1]):
                    # If the features function of the current scale is the same
                    # as the one of the next scale, this means that the affine
                    # transform is the same as well, and thus can be omitted.
                    # Given that the scale factors are different, we need to do
                    # the # following composition:
                    #
                    #    S_{i+1} \circ inv(S_i)
                    #
                    # where:
                    #    S_i : scaling transform of current scale
                    #    S_{i+1} : scaling transform of next scale
                    transform = scale_transforms[
                        i + 1].pseudoinverse().compose_after(
                            scale_transforms[i])
                    shape = transform.apply(shape)

        # Return list of algorithm results
        return algorithm_results
Ejemplo n.º 11
0
    def _fit(self, images, initial_shape, affine_transforms, scale_transforms,
             gt_shapes=None, max_iters=20, return_costs=False, **kwargs):
        r"""
        Function the applies the multi-scale fitting procedure on an image, given
        the initial shape.

        Parameters
        ----------
        images : `list` of `menpo.image.Image`
            The list of images per scale.
        initial_shape : `menpo.shape.PointCloud`
            The initial shape estimate from which the fitting procedure
            will start.
        affine_transforms : `list` of `menpo.transform.Affine`
            The list of affine transforms per scale that are the inverses of the
            transformations introduced by the rescale wrt the reference shape as
            well as the feature extraction.
        scale_transforms : `list` of `menpo.shape.Scale`
            The list of inverse scaling transforms per scale.
        gt_shapes : `list` of `menpo.shape.PointCloud`
            The list of ground truth shapes per scale.
        max_iters : `int` or `list` of `int`, optional
            The maximum number of iterations. If `int`, then it specifies the
            maximum number of iterations over all scales. If `list` of `int`,
            then specifies the maximum number of iterations per scale.
        return_costs : `bool`, optional
            If ``True``, then the cost function values will be computed
            during the fitting procedure. Then these cost values will be
            assigned to the returned `fitting_result`. *Note that the costs
            computation increases the computational cost of the fitting. The
            additional computation cost depends on the fitting method. Only
            use this option for research purposes.*
        kwargs : `dict`, optional
            Additional keyword arguments that can be passed to specific
            implementations.

        Returns
        -------
        algorithm_results : `list` of :map:`NonParametricIterativeResult` or subclass
            The list of fitting result per scale.
        """
        # Check max iters
        max_iters = checks.check_max_iters(max_iters, self.n_scales)

        # Set initial and ground truth shapes
        shape = initial_shape
        gt_shape = None

        # Initialize list of algorithm results
        algorithm_results = []
        for i in range(self.n_scales):
            # Handle ground truth shape
            if gt_shapes is not None:
                gt_shape = gt_shapes[i]

            # Run algorithm
            algorithm_result = self.algorithms[i].run(images[i], shape,
                                                      gt_shape=gt_shape,
                                                      max_iters=max_iters[i],
                                                      return_costs=return_costs,
                                                      **kwargs)
            # Add algorithm result to the list
            algorithm_results.append(algorithm_result)

            # Prepare this scale's final shape for the next scale
            if i < self.n_scales - 1:
                # This should not be done for the last scale.
                shape = algorithm_result.final_shape
                if self.holistic_features[i + 1] != self.holistic_features[i]:
                    # If the features function of the current scale is different
                    # than the one of the next scale, this means that the affine
                    # transform is different as well. Thus we need to do the
                    # following composition:
                    #
                    #    S_{i+1} \circ A_{i+1} \circ inv(A_i) \circ inv(S_i)
                    #
                    # where:
                    #    S_i : scaling transform of current scale
                    #    S_{i+1} : scaling transform of next scale
                    #    A_i : affine transform of current scale
                    #    A_{i+1} : affine transform of next scale
                    t1 = scale_transforms[i].compose_after(affine_transforms[i])
                    t2 = affine_transforms[i + 1].pseudoinverse().compose_after(t1)
                    transform = scale_transforms[i + 1].pseudoinverse().compose_after(t2)
                    shape = transform.apply(shape)
                elif (self.holistic_features[i + 1] == self.holistic_features[i] and
                      self.scales[i] != self.scales[i + 1]):
                    # If the features function of the current scale is the same
                    # as the one of the next scale, this means that the affine
                    # transform is the same as well, and thus can be omitted.
                    # Given that the scale factors are different, we need to do
                    # the # following composition:
                    #
                    #    S_{i+1} \circ inv(S_i)
                    #
                    # where:
                    #    S_i : scaling transform of current scale
                    #    S_{i+1} : scaling transform of next scale
                    transform = scale_transforms[i + 1].pseudoinverse().compose_after(scale_transforms[i])
                    shape = transform.apply(shape)

        # Return list of algorithm results
        return algorithm_results
Ejemplo n.º 12
0
    def _fit(self,
             images,
             initial_shape,
             gt_shapes=None,
             max_iters=20,
             **kwargs):
        r"""
        Fits the fitter to the multilevel pyramidal images.

        Parameters
        -----------
        images: :class:`menpo.image.masked.MaskedImage` list
            The images to be fitted.
        initial_shape: :class:`menpo.shape.PointCloud`
            The initial shape from which the fitting will start.
        gt_shapes: :class:`menpo.shape.PointCloud` list, optional
            The original ground truth shapes associated to the multilevel
            images.
        max_iters: int or list, optional
            The maximum number of iterations.
            If int, then this will be the overall maximum number of iterations
            for all the pyramidal levels.
            If list, then a maximum number of iterations is specified for each
            pyramidal level.

        Returns
        -------
        algorithm_results: :class:`FittingResult` list
            The fitting object containing the state of the whole fitting
            procedure.
        """
        # Perform check
        max_iters = checks.check_max_iters(max_iters, self.n_scales)

        # Set initial and ground truth shapes
        shape = initial_shape
        gt_shape = None

        # Initialize list of algorithm results
        algorithm_results = []
        for i in range(self.n_scales):
            # Handle ground truth shape
            if gt_shapes is not None:
                gt_shape = gt_shapes[i]

            # Run algorithm
            algorithm_result = self.algorithms[i].run(images[i],
                                                      shape,
                                                      gt_shape=gt_shape,
                                                      max_iters=max_iters[i],
                                                      **kwargs)
            # Add algorithm result to the list
            algorithm_results.append(algorithm_result)

            # Prepare this scale's final shape for the next scale
            shape = algorithm_result.final_shape
            if self.scales[i] != self.scales[-1]:
                shape = Scale(self.scales[i + 1] / self.scales[i],
                              n_dims=shape.n_dims).apply(shape)

        # Return list of algorithm results
        return algorithm_results