示例#1
0
    def shapes(self, as_points=False):
        r"""
        Generates a list containing the shapes obtained at each fitting
        iteration.

        Parameters
        -----------
        as_points : `boolean`, optional
            Whether the result is returned as a `list` of :map:`PointCloud` or
            a `list` of `ndarrays`.

        Returns
        -------
        shapes : `list` of :map:`PointCoulds` or `list` of `ndarray`
            A list containing the fitted shapes at each iteration of
            the fitting procedure.
        """
        shapes = []
        for j, (alg, s) in enumerate(zip(self.algorithm_results, self.scales)):
            transform = Scale(self.scales[-1]/s, alg.final_shape.n_dims)
            for t in alg.shapes(as_points=as_points):
                t = transform.apply(t)
                shapes.append(self._affine_correction.apply(t))

        return shapes
示例#2
0
    def tcoords_pixel_scaled(self):
        r"""
        Returns a :map:`PointCloud` that is modified to be suitable for directly
        indexing into the pixels of the texture (e.g. for manual mapping
        operations). The resulting tcoords behave just like image landmarks
        do.

        The operations that are performed are:

          - Flipping the origin from bottom-left to top-left
          - Scaling the tcoords by the image shape (denormalising them)
          - Permuting the axis so that

        Returns
        -------
        tcoords_scaled : :map:`PointCloud`
            A copy of the tcoords that behave like :map:`Image` landmarks

        Examples
        --------
        Recovering pixel values for every texture coordinate:

        >>> texture = texturedtrimesh.texture
        >>> tc_ps = texturedtrimesh.tcoords_pixel_scaled()
        >>> pixel_values_at_tcs = texture[tc_ps[: ,0], tc_ps[:, 1]]
        """
        scale = Scale(np.array(self.texture.shape)[::-1])
        tcoords = self.tcoords.points.copy()
        # flip the 'y' st 1 -> 0 and 0 -> 1, moving the axis to upper left
        tcoords[:, 1] = 1 - tcoords[:, 1]
        # apply the scale to get the units correct
        tcoords = scale.apply(tcoords)
        # flip axis 0 and axis 1 so indexing is as expected
        tcoords = tcoords[:, ::-1]
        return PointCloud(tcoords)
示例#3
0
    def shapes(self, as_points=False):
        r"""
        Generates a list containing the shapes obtained at each fitting
        iteration.

        Parameters
        -----------
        as_points : `boolean`, optional
            Whether the result is returned as a `list` of :map:`PointCloud` or
            a `list` of `ndarrays`.

        Returns
        -------
        shapes : `list` of :map:`PointCoulds` or `list` of `ndarray`
            A list containing the fitted shapes at each iteration of
            the fitting procedure.
        """
        shapes = []
        for j, (alg, s) in enumerate(zip(self.algorithm_results, self.scales)):
            transform = Scale(self.scales[-1] / s, alg.final_shape.n_dims)
            for t in alg.shapes(as_points=as_points):
                t = transform.apply(t)
                shapes.append(self._affine_correction.apply(t))

        return shapes
示例#4
0
文件: textured.py 项目: mozata/menpo
    def tcoords_pixel_scaled(self):
        r"""
        Returns a :map:`PointCloud` that is modified to be suitable for directly
        indexing into the pixels of the texture (e.g. for manual mapping
        operations). The resulting tcoords behave just like image landmarks
        do.

        The operations that are performed are:

          - Flipping the origin from bottom-left to top-left
          - Scaling the tcoords by the image shape (denormalising them)
          - Permuting the axis so that

        Returns
        -------
        tcoords_scaled : :map:`PointCloud`
            A copy of the tcoords that behave like :map:`Image` landmarks

        Examples
        --------
        Recovering pixel values for every texture coordinate:

        >>> texture = texturedtrimesh.texture
        >>> tc_ps = texturedtrimesh.tcoords_pixel_scaled()
        >>> pixel_values_at_tcs = texture[tc_ps[: ,0], tc_ps[:, 1]]
        """
        scale = Scale(np.array(self.texture.shape)[::-1])
        tcoords = self.tcoords.points.copy()
        # flip the 'y' st 1 -> 0 and 0 -> 1, moving the axis to upper left
        tcoords[:, 1] = 1 - tcoords[:, 1]
        # apply the scale to get the units correct
        tcoords = scale.apply(tcoords)
        # flip axis 0 and axis 1 so indexing is as expected
        tcoords = tcoords[:, ::-1]
        return PointCloud(tcoords)
示例#5
0
def _rescale_shapes_to_reference(algorithm_results, scales, affine_correction):
    r"""
    """
    shapes = []
    for j, (alg, scale) in enumerate(zip(algorithm_results, scales)):
        transform = Scale(scales[-1] / scale, alg.final_shape.n_dims)
        for shape in alg.shapes:
            shape = transform.apply(shape)
            shapes.append(affine_correction.apply(shape))
    return shapes
示例#6
0
def _rescale_shapes_to_reference(fitting_results, n_levels, downscale,
                                 affine_correction):
    n = n_levels - 1
    shapes = []
    for j, f in enumerate(fitting_results):
        transform = Scale(downscale ** (n - j), f.final_shape.n_dims)
        for t in f.shapes:
            t = transform.apply(t)
            shapes.append(affine_correction.apply(t))
    return shapes
示例#7
0
def _rescale_shapes_to_reference(algorithm_results, scales, affine_correction):
    r"""
    """
    shapes = []
    for j, (alg, scale) in enumerate(zip(algorithm_results, scales)):
        transform = Scale(scales[-1]/scale, alg.final_shape.n_dims)
        for shape in alg.shapes:
            shape = transform.apply(shape)
            shapes.append(affine_correction.apply(shape))
    return shapes
示例#8
0
def test_chain_compose_after_inplace_tps():
    a = PointCloud(np.random.random([10, 2]))
    b = PointCloud(np.random.random([10, 2]))
    tps = ThinPlateSplines(a, b)

    t = Translation([3, 4])
    s = Scale([4, 2])
    chain = TransformChain([t, s])
    chain.compose_after_inplace(tps)

    points = PointCloud(np.random.random([10, 2]))

    manual_res = s.apply(t.apply(tps.apply(points)))
    chain_res = chain.apply(points)
    assert (np.all(manual_res.points == chain_res.points))
示例#9
0
def chain_compose_before_tps_test():
    a = PointCloud(np.random.random([10, 2]))
    b = PointCloud(np.random.random([10, 2]))
    tps = ThinPlateSplines(a, b)

    t = Translation([3, 4])
    s = Scale([4, 2])
    chain = TransformChain([t, s])
    chain_mod = chain.compose_before(tps)

    points = PointCloud(np.random.random([10, 2]))

    manual_res = tps.apply(s.apply(t.apply(points)))
    chain_res = chain_mod.apply(points)
    assert(np.all(manual_res.points == chain_res.points))
示例#10
0
    def _train_batch(self,
                     image_batch,
                     increment=False,
                     group=None,
                     bounding_box_group_glob=None,
                     verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(image_batch,
                                                        group,
                                                        self.reference_shape,
                                                        verbose=verbose)

        generated_bb_func = generate_perturbations_from_gt(
            image_batch,
            self.n_perturbations,
            self._perturb_from_gt_bounding_box,
            gt_group=group,
            bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # for each scale (low --> high)
        current_shapes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[
                    j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract scaled ground truth shapes for current scale
            scaled_shapes = [i.landmarks[group].lms for i in scaled_images]

            if j == 0:
                msg = '{}Aligning reference shape with bounding boxes.'.format(
                    scale_prefix)
                wrap = partial(print_progress,
                               prefix=msg,
                               end_with_newline=False,
                               verbose=verbose)

                # Extract perturbations at the very bottom level
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for bbox in generated_bb_func(ii):
                        c_s = align_shape_with_bounding_box(
                            self.reference_shape, bbox)
                        c_shapes.append(c_s)
                    current_shapes.append(c_shapes)

            # train supervised descent algorithm
            if not increment:
                current_shapes = self.algorithms[j].train(scaled_images,
                                                          scaled_shapes,
                                                          current_shapes,
                                                          prefix=scale_prefix,
                                                          verbose=verbose)
            else:
                current_shapes = self.algorithms[j].increment(
                    scaled_images,
                    scaled_shapes,
                    current_shapes,
                    prefix=scale_prefix,
                    verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for image_shapes in current_shapes:
                    for k, shape in enumerate(image_shapes):
                        image_shapes[k] = transform.apply(shape)
示例#11
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     bounding_box_group_glob=None, verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        generated_bb_func = generate_perturbations_from_gt(
            image_batch, self.n_perturbations,
            self._perturb_from_gt_bounding_box, gt_group=group,
            bb_group_glob=bounding_box_group_glob, verbose=verbose)

        # for each scale (low --> high)
        current_shapes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract scaled ground truth shapes for current scale
            scaled_shapes = [i.landmarks[group].lms for i in scaled_images]

            if j == 0:
                msg = '{}Aligning reference shape with bounding boxes.'.format(
                    scale_prefix)
                wrap = partial(print_progress, prefix=msg,
                               end_with_newline=False, verbose=verbose)

                # Extract perturbations at the very bottom level
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for bbox in generated_bb_func(ii):
                        c_s = align_shape_with_bounding_box(
                            self.reference_shape, bbox)
                        c_shapes.append(c_s)
                    current_shapes.append(c_shapes)

            # train supervised descent algorithm
            if not increment:
                current_shapes = self.algorithms[j].train(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)
            else:
                current_shapes = self.algorithms[j].increment(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for image_shapes in current_shapes:
                    for k, shape in enumerate(image_shapes):
                        image_shapes[k] = transform.apply(shape)
示例#12
0
    def _train_batch(self, template, shape_batch, increment=False, group=None,
                     shape_forgetting_factor=1.0, verbose=False):
        r"""
        Builds an Active Template Model from a list of landmarked images.
        """
        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle features
            if j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features([template],
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
                # Extract potentially rescaled shapes
                scale_transform = Scale(scale_factor=self.scales[j],
                                        n_dims=2)
                scale_shapes = [scale_transform.apply(s)
                                for s in shape_batch]
            else:
                scaled_images = feature_images
                scale_shapes = shape_batch

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                if j == 0:
                    shape_model = self._build_shape_model(scale_shapes, j)
                    self.shape_models.append(shape_model)
                else:
                    self.shape_models.append(deepcopy(shape_model))
            else:
                self._increment_shape_model(
                    scale_shapes,  self.shape_models[j],
                    forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_template = self._warp_template(scaled_images[0], group,
                                                  scaled_reference_shape,
                                                  j, scale_prefix, verbose)
            self.warped_templates.append(warped_template[0])

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))

        # Because we just copy the shape model, we need to wait to trim
        # it after building each model. This ensures we can have a different
        # number of components per level
        for j, sm in enumerate(self.shape_models):
            max_sc = self.max_shape_components[j]
            if max_sc is not None:
                sm.trim_components(max_sc)
示例#13
0
    def _train(self, original_images, group=None, bounding_box_group_glob=None,
               verbose=False):
        # Dlib does not support incremental builds, so we must be passed a list
        if not isinstance(original_images, list):
            original_images = list(original_images)
        # We use temporary landmark groups - so we need the group key to not be
        # None
        if group is None:
            group = original_images[0].landmarks.group_labels[0]

        # Temporarily store all the bounding boxes for rescaling
        for i in original_images:
            i.landmarks['__gt_bb'] = i.landmarks[group].lms.bounding_box()

        if self.reference_shape is None:
            # If no reference shape was given, use the mean of the first batch
            self.reference_shape = compute_reference_shape(
                [i.landmarks['__gt_bb'].lms for i in original_images],
                self.diagonal, verbose=verbose)

        # Rescale to existing reference shape
        images = rescale_images_to_reference_shape(
            original_images, '__gt_bb', self.reference_shape,
            verbose=verbose)

        # Scaling is done - remove temporary gt bounding boxes
        for i, i2 in zip(original_images, images):
            del i.landmarks['__gt_bb']
            del i2.landmarks['__gt_bb']

        generated_bb_func = generate_perturbations_from_gt(
            images, self.n_perturbations, self._perturb_from_gt_bounding_box,
            gt_group=group, bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # for each scale (low --> high)
        current_bounding_boxes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = images

            if j == 0:
                current_bounding_boxes = [generated_bb_func(im)
                                          for im in scaled_images]

            # Extract scaled ground truth shapes for current scale
            scaled_gt_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Train the Dlib model
            current_bounding_boxes = self.algorithms[j].train(
                scaled_images, scaled_gt_shapes, current_bounding_boxes,
                prefix=scale_prefix, verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for bboxes in current_bounding_boxes:
                    for k, bb in enumerate(bboxes):
                        bboxes[k] = transform.apply(bb)
示例#14
0
    def _train(self,
               original_images,
               group=None,
               bounding_box_group_glob=None,
               verbose=False):
        r"""
        """
        # Dlib does not support incremental builds, so we must be passed a list
        if not isinstance(original_images, list):
            original_images = list(original_images)
        # We use temporary landmark groups - so we need the group key to not be
        # None
        if group is None:
            group = original_images[0].landmarks.group_labels[0]

        # Temporarily store all the bounding boxes for rescaling
        for i in original_images:
            i.landmarks['__gt_bb'] = i.landmarks[group].lms.bounding_box()

        if self.reference_shape is None:
            # If no reference shape was given, use the mean of the first batch
            self.reference_shape = compute_reference_shape(
                [i.landmarks['__gt_bb'].lms for i in original_images],
                self.diagonal,
                verbose=verbose)

        # Rescale to existing reference shape
        images = rescale_images_to_reference_shape(original_images,
                                                   '__gt_bb',
                                                   self.reference_shape,
                                                   verbose=verbose)

        # Scaling is done - remove temporary gt bounding boxes
        for i, i2 in zip(original_images, images):
            del i.landmarks['__gt_bb']
            del i2.landmarks['__gt_bb']

        generated_bb_func = generate_perturbations_from_gt(
            images,
            self.n_perturbations,
            self._perturb_from_gt_bounding_box,
            gt_group=group,
            bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # for each scale (low --> high)
        current_bounding_boxes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = images

            if j == 0:
                current_bounding_boxes = [
                    generated_bb_func(im) for im in scaled_images
                ]

            # Extract scaled ground truth shapes for current scale
            scaled_gt_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Train the Dlib model
            current_bounding_boxes = self.algorithms[j].train(
                scaled_images,
                scaled_gt_shapes,
                current_bounding_boxes,
                prefix=scale_prefix,
                verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for bboxes in current_bounding_boxes:
                    for bb in enumerate(bboxes):
                        bboxes[k] = transform.apply(bb)
示例#15
0
    def _train_batch(
        self, template, shape_batch, increment=False, group=None, shape_forgetting_factor=1.0, verbose=False
    ):
        r"""
        Builds an Active Template Model from a list of landmarked images.
        """
        # build models at each scale
        if verbose:
            print_dynamic("- Building models\n")

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = "  - Scale {}: ".format(j)
                else:
                    scale_prefix = "  - "
            else:
                scale_prefix = None

            # Handle features
            if j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(
                    [template], self.holistic_features[j], prefix=scale_prefix, verbose=verbose
                )
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j], prefix=scale_prefix, verbose=verbose)
                # Extract potentially rescaled shapes
                scale_transform = Scale(scale_factor=self.scales[j], n_dims=2)
                scale_shapes = [scale_transform.apply(s) for s in shape_batch]
            else:
                scaled_images = feature_images
                scale_shapes = shape_batch

            # Build the shape model
            if verbose:
                print_dynamic("{}Building shape model".format(scale_prefix))

            if not increment:
                shape_model = self._build_shape_model(scale_shapes, j)
                self.shape_models.append(shape_model)
            else:
                self._increment_shape_model(scale_shapes, j, forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(self.reference_shape)
            warped_template = self._warp_template(
                scaled_images[0], group, scaled_reference_shape, j, scale_prefix, verbose
            )
            self.warped_templates.append(warped_template[0])

            if verbose:
                print_dynamic("{}Done\n".format(scale_prefix))
示例#16
0
    def _train_batch(self, template, shape_batch, increment=False, group=None,
                     shape_forgetting_factor=1.0, verbose=False):
        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle features
            if j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features([template],
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
                # Extract potentially rescaled shapes
                scale_transform = Scale(scale_factor=self.scales[j],
                                        n_dims=2)
                scale_shapes = [scale_transform.apply(s)
                                for s in shape_batch]
            else:
                scaled_images = feature_images
                scale_shapes = shape_batch

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                shape_model = self._build_shape_model(scale_shapes, j)
                self.shape_models.append(shape_model)
            else:
                self._increment_shape_model(
                    scale_shapes, j, forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_template = self._warp_template(scaled_images[0], group,
                                                  scaled_reference_shape,
                                                  j, scale_prefix, verbose)
            self.warped_templates.append(warped_template[0])

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
示例#17
0
    def build(self, shapes, template, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Active Template Model given a list of shapes and a
        template image.

        Parameters
        ----------
        shapes : list of :map:`PointCloud`
            The set of shapes from which to build the shape model of the ATM.

        template : :map:`Image` or subclass
            The image to be used as template.

        group : `string`, optional
            The key of the landmark set of the template that should be used. If
            ``None``, and if there is only one set of landmarks, this set will
            be used.

        label : `string`, optional
            The label of the landmark manager of the template that you wish to
            use. If ``None`` is passed, the convex hull of all landmarks is
            used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        atm : :map:`ATM`
            The ATM object. Shape and appearance models are stored from lowest
            to highest level.
        """
        # compute reference_shape
        self.reference_shape = compute_reference_shape(
            shapes, self.normalization_diagonal, verbose=verbose)

        # normalize the template size using the reference_shape scaling
        if verbose:
            print_dynamic('- Normalizing template size')
        normalized_template = template.rescale_to_reference_shape(
            self.reference_shape, group=group, label=label)

        # create pyramid for template image
        if verbose:
            print_dynamic('- Creating template pyramid')
        generator = create_pyramid([normalized_template], self.n_levels,
                                   self.downscale, self.features)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        warped_templates = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters in form of list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # rescale shapes if required
            if j > 0 and self.scaled_shape_models:
                scale_transform = Scale(scale_factor=1.0 / self.downscale,
                                        n_dims=2)
                shapes = [scale_transform.apply(s) for s in shapes]

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = build_shape_model(shapes,
                                            self.max_shape_components[rj])
            reference_frame = self._build_reference_frame(shape_model.mean())

            # add shape model to the list
            shape_models.append(shape_model)

            # get template's feature image of current level
            if verbose:
                print_dynamic('{}Warping template'.format(level_str))
            feature_template = next(generator[0])

            # compute transform
            transform = self.transform(reference_frame.landmarks['source'].lms,
                                       feature_template.landmarks[group][label])

            # warp template to reference frame
            warped_templates.append(
                feature_template.warp_to_mask(reference_frame.mask, transform))

            # attach reference_frame to template's source shape
            warped_templates[j].landmarks['source'] = \
                reference_frame.landmarks['source']

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        warped_templates.reverse()
        n_training_shapes = len(shapes)

        return self._build_atm(shape_models, warped_templates,
                               n_training_shapes)