Ejemplo n.º 1
0
 def _warp_images(self, images, shapes, reference_shape, scale_index,
                  prefix, verbose):
     scaled_images = scale_images(images,
                                  self.scales[scale_index],
                                  prefix=prefix,
                                  verbose=verbose)
     warpped, landmarkmapping, timages = warp_images(
         scaled_images, self.reference_frame, self.transforms,
         self.scales[scale_index])
     if self.scales[scale_index] >= 1.0:
         self.mapping = [landmarkmapping, timages]
     return warpped
Ejemplo n.º 2
0
    def _train(self, images, group=None, verbose=False):
        checks.check_landmark_trilist(images[0], self.transform, group=group)
        self.reference_shape = compute_reference_shape(
            [i.landmarks[group] for i in images],
            self.diagonal,
            verbose=verbose)

        # normalize images
        images = rescale_images_to_reference_shape(images,
                                                   group,
                                                   self.reference_shape,
                                                   verbose=verbose)
        if self.sigma:
            images = [fsmooth(i, self.sigma) for i in images]

        # Build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = images
            elif j == 0 or self.holistic_features[
                    j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(images,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group] for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            shape_model = self._build_shape_model(scale_shapes, j)
            self.shape_models.append(shape_model)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              scaled_reference_shape, j,
                                              scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic(
                    '{}Building appearance model'.format(scale_prefix))

            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[j] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[j])
            # add appearance model to the list
            self.appearance_models.append(appearance_model)

            expert_ensemble = self.expert_ensemble_cls[j](
                images=scaled_images,
                shapes=scale_shapes,
                patch_shape=self.patch_shape[j],
                patch_normalisation=self.patch_normalisation,
                cosine_mask=self.cosine_mask,
                context_shape=self.context_shape[j],
                sample_offsets=self.sample_offsets,
                prefix=scale_prefix,
                verbose=verbose)
            self.expert_ensembles.append(expert_ensemble)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
Ejemplo n.º 3
0
    def _train_batch(
        self, template, shape_batch, increment=False, group=None, shape_forgetting_factor=1.0, verbose=False
    ):
        r"""
        Builds an Active Template Model from a list of landmarked images.
        """
        # build models at each scale
        if verbose:
            print_dynamic("- Building models\n")

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = "  - Scale {}: ".format(j)
                else:
                    scale_prefix = "  - "
            else:
                scale_prefix = None

            # Handle features
            if j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(
                    [template], self.holistic_features[j], prefix=scale_prefix, verbose=verbose
                )
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j], prefix=scale_prefix, verbose=verbose)
                # Extract potentially rescaled shapes
                scale_transform = Scale(scale_factor=self.scales[j], n_dims=2)
                scale_shapes = [scale_transform.apply(s) for s in shape_batch]
            else:
                scaled_images = feature_images
                scale_shapes = shape_batch

            # Build the shape model
            if verbose:
                print_dynamic("{}Building shape model".format(scale_prefix))

            if not increment:
                shape_model = self._build_shape_model(scale_shapes, j)
                self.shape_models.append(shape_model)
            else:
                self._increment_shape_model(scale_shapes, j, forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(self.reference_shape)
            warped_template = self._warp_template(
                scaled_images[0], group, scaled_reference_shape, j, scale_prefix, verbose
            )
            self.warped_templates.append(warped_template[0])

            if verbose:
                print_dynamic("{}Done\n".format(scale_prefix))
Ejemplo n.º 4
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     bounding_box_group_glob=None, verbose=False):
        # Rescale images wrt the scale factor between the existing
        # reference_shape and their ground truth (group) shapes
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        # Create a callable that generates perturbations of the bounding boxes
        # of the provided images.
        generated_bb_func = generate_perturbations_from_gt(
            image_batch, self.n_perturbations,
            self._perturb_from_gt_bounding_box, gt_group=group,
            bb_group_glob=bounding_box_group_glob, verbose=verbose)

        # For each scale (low --> high)
        for j in range(self.n_scales):
            # Print progress if asked
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Extract features. Features are extracted only if we are at the
            # first scale or if the features of the current scale are different
            # than the ones extracted at the previous scale.
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif (j == 0 or
                  self.holistic_features[j] != self.holistic_features[j - 1]):
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)

            # Rescale images according to scales. Note that scale_images is smart
            # enough in order not to rescale the images if the current scale
            # factor equals to 1.
            scaled_images, scale_transforms = scale_images(
                feature_images, self.scales[j], prefix=scale_prefix,
                return_transforms=True, verbose=verbose)

            # Extract scaled ground truth shapes for current scale
            scaled_shapes = [i.landmarks[group] for i in scaled_images]

            # Get shape estimations of current scale. If we are at the first
            # scale, this is done by aligning the reference shape with the
            # perturbed bounding boxes. If we are at the rest of the scales,
            # then the current shapes are attached on the scaled_images with
            # key '__sdm_current_shape_{}'.
            current_shapes = []
            if j == 0:
                # At the first scale, the current shapes are created by aligning
                # the reference shape to the perturbed bounding boxes.
                msg = '{}Aligning reference shape with bounding boxes.'.format(
                    scale_prefix)
                wrap = partial(print_progress, prefix=msg,
                               end_with_newline=False, verbose=verbose)
                # Extract perturbations at the very bottom level
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for bbox in generated_bb_func(ii):
                        c_s = align_shape_with_bounding_box(
                            self.reference_shape, bbox)
                        c_shapes.append(c_s)
                    current_shapes.append(c_shapes)
            else:
                # At the rest of the scales, extract the current shapes that
                # were attached to the images
                msg = '{}Extracting shape estimations from previous ' \
                      'scale.'.format(scale_prefix)
                wrap = partial(print_progress, prefix=msg,
                               end_with_newline=False, verbose=verbose)
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for k in list(range(self.n_perturbations)):
                        c_key = '__sdm_current_shape_{}'.format(k)
                        c_shapes.append(ii.landmarks[c_key])
                    current_shapes.append(c_shapes)

            # Train supervised descent algorithm. This returns the shape
            # estimations for the next scale.
            if not increment:
                current_shapes = self.algorithms[j].train(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)
            else:
                current_shapes = self.algorithms[j].increment(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)

            # Scale the current shape estimations for the next level. This
            # doesn't have to be done for the last scale. The only thing we need
            # to do at the last scale is to remove any attached landmarks from
            # the training images.
            if j < (self.n_scales - 1):
                if self.holistic_features[j + 1] != self.holistic_features[j]:
                    # Features will be extracted, thus attach current_shapes on
                    # the training images (image_batch)
                    for jj, image_shapes in enumerate(current_shapes):
                        for k, shape in enumerate(image_shapes):
                            c_key = '__sdm_current_shape_{}'.format(k)
                            image_batch[jj].landmarks[c_key] = \
                                scale_transforms[jj].apply(shape)
                else:
                    # Features won't be extracted;. the same feature_images will
                    # be used for the next scale, thus attach current_shapes on
                    # them.
                    for jj, image_shapes in enumerate(current_shapes):
                        for k, shape in enumerate(image_shapes):
                            c_key = '__sdm_current_shape_{}'.format(k)
                            feature_images[jj].landmarks[c_key] = \
                                scale_transforms[jj].apply(shape)
            else:
                # Check if original training image (image_batch) got some current
                # shape estimations attached. If yes, delete them.
                if '__sdm_current_shape_0' in image_batch[0].landmarks:
                    for image in image_batch:
                        for k in list(range(self.n_perturbations)):
                            c_key = '__sdm_current_shape_{}'.format(k)
                            del image.landmarks[c_key]
Ejemplo n.º 5
0
    def _train_batch(self,
                     image_batch,
                     increment=False,
                     group=None,
                     bounding_box_group_glob=None,
                     verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(image_batch,
                                                        group,
                                                        self.reference_shape,
                                                        verbose=verbose)

        generated_bb_func = generate_perturbations_from_gt(
            image_batch,
            self.n_perturbations,
            self._perturb_from_gt_bounding_box,
            gt_group=group,
            bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # for each scale (low --> high)
        current_shapes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[
                    j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract scaled ground truth shapes for current scale
            scaled_shapes = [i.landmarks[group].lms for i in scaled_images]

            if j == 0:
                msg = '{}Aligning reference shape with bounding boxes.'.format(
                    scale_prefix)
                wrap = partial(print_progress,
                               prefix=msg,
                               end_with_newline=False,
                               verbose=verbose)

                # Extract perturbations at the very bottom level
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for bbox in generated_bb_func(ii):
                        c_s = align_shape_with_bounding_box(
                            self.reference_shape, bbox)
                        c_shapes.append(c_s)
                    current_shapes.append(c_shapes)

            # train supervised descent algorithm
            if not increment:
                current_shapes = self.algorithms[j].train(scaled_images,
                                                          scaled_shapes,
                                                          current_shapes,
                                                          prefix=scale_prefix,
                                                          verbose=verbose)
            else:
                current_shapes = self.algorithms[j].increment(
                    scaled_images,
                    scaled_shapes,
                    current_shapes,
                    prefix=scale_prefix,
                    verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for image_shapes in current_shapes:
                    for k, shape in enumerate(image_shapes):
                        image_shapes[k] = transform.apply(shape)
Ejemplo n.º 6
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     shape_forgetting_factor=1.0, verbose=False):
        # normalize images
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape, verbose=verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Training models\n')

        # for each level (low --> high)
        for i in range(self.n_scales):
            if verbose:
                if self.n_scales > 1:
                    prefix = '  - Scale {}: '.format(i)
                else:
                    prefix = '  - '
            else:
                prefix = None

            # Handle holistic features
            if i == 0 and self.holistic_features[i] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif i == 0 or self.holistic_features[i] is not self.holistic_features[i - 1]:
                # compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[i],
                                                  prefix=prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[i] != 1:
                # scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[i],
                                             prefix=prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # extract scaled shapes
            scaled_shapes = [image.landmarks[group]
                             for image in scaled_images]

            # train shape model
            if verbose:
                print_dynamic('{}Training shape model'.format(prefix))

            if not increment:
                shape_model = self._build_shape_model(scaled_shapes, i)
                self.shape_models.append(shape_model)
            else:
                self._increment_shape_model(
                    scaled_shapes, i, forgetting_factor=shape_forgetting_factor)

            # train expert ensemble
            if verbose:
                print_dynamic('{}Training expert ensemble'.format(prefix))

            if increment:
                self.expert_ensembles[i].increment(scaled_images,
                                                   scaled_shapes,
                                                   prefix=prefix,
                                                   verbose=verbose)
            else:
                expert_ensemble = self.expert_ensemble_cls[i](
                        images=scaled_images, shapes=scaled_shapes,
                        patch_shape=self.patch_shape[i],
                        patch_normalisation=self.patch_normalisation,
                        cosine_mask=self.cosine_mask,
                        context_shape=self.context_shape[i],
                        sample_offsets=self.sample_offsets,
                        prefix=prefix, verbose=verbose)
                self.expert_ensembles.append(expert_ensemble)

            if verbose:
                print_dynamic('{}Done\n'.format(prefix))
Ejemplo n.º 7
0
    def _train(self, images, group=None, verbose=False):
        checks.check_landmark_trilist(images[0], self.transform, group=group)
        self.reference_shape = compute_reference_shape(
            [i.landmarks[group] for i in images],
            self.diagonal, verbose=verbose)
        
        # normalize images
        images = rescale_images_to_reference_shape(
            images, group, self.reference_shape, verbose=verbose)
        if self.sigma:
            images = [fsmooth(i, self.sigma) for i in images]

        # Build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = images
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(images,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group] for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            shape_model = self._build_shape_model(scale_shapes, j)
            self.shape_models.append(shape_model)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              scaled_reference_shape,
                                              j, scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(
                    scale_prefix))

            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[j] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[j])
            # add appearance model to the list
            self.appearance_models.append(appearance_model)

            expert_ensemble = self.expert_ensemble_cls[j](
                images=scaled_images, shapes=scale_shapes,
                patch_shape=self.patch_shape[j],
                patch_normalisation=self.patch_normalisation,
                cosine_mask=self.cosine_mask,
                context_shape=self.context_shape[j],
                sample_offsets=self.sample_offsets,
                prefix=scale_prefix, verbose=verbose)
            self.expert_ensembles.append(expert_ensemble)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
    def _train(self,
               original_images,
               group=None,
               bounding_box_group_glob=None,
               verbose=False):
        # Dlib does not support incremental builds, so we must be passed a list
        if not isinstance(original_images, list):
            original_images = list(original_images)
        # We use temporary landmark groups - so we need the group key to not be
        # None
        if group is None:
            group = original_images[0].landmarks.group_labels[0]

        # Temporarily store all the bounding boxes for rescaling
        for i in original_images:
            i.landmarks['__gt_bb'] = i.landmarks[group].lms.bounding_box()

        if self.reference_shape is None:
            # If no reference shape was given, use the mean of the first batch
            self._reference_shape = compute_reference_shape(
                [i.landmarks['__gt_bb'].lms for i in original_images],
                self.diagonal,
                verbose=verbose)

        # Rescale images wrt the scale factor between the existing
        # reference_shape and their ground truth (group) bboxes
        images = rescale_images_to_reference_shape(original_images,
                                                   '__gt_bb',
                                                   self.reference_shape,
                                                   verbose=verbose)

        # Scaling is done - remove temporary gt bounding boxes
        for i, i2 in zip(original_images, images):
            del i.landmarks['__gt_bb']
            del i2.landmarks['__gt_bb']

        # Create a callable that generates perturbations of the bounding boxes
        # of the provided images.
        generated_bb_func = generate_perturbations_from_gt(
            images,
            self.n_perturbations,
            self._perturb_from_gt_bounding_box,
            gt_group=group,
            bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # For each scale (low --> high)
        for j in range(self.n_scales):
            # Print progress if asked
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Rescale images according to scales. Note that scale_images is smart
            # enough in order not to rescale the images if the current scale
            # factor equals to 1.
            scaled_images, scale_transforms = scale_images(
                images,
                self.scales[j],
                prefix=scale_prefix,
                return_transforms=True,
                verbose=verbose)

            # Get bbox estimations of current scale. If we are at the first
            # scale, this is done by using generated_bb_func. If we are at the
            # rest of the scales, then the current bboxes are attached on the
            # scaled_images with key '__ert_current_bbox_{}'.
            current_bounding_boxes = []
            if j == 0:
                # At the first scale, the current bboxes are created by calling
                # generated_bb_func.
                current_bounding_boxes = [
                    generated_bb_func(im) for im in scaled_images
                ]
            else:
                # At the rest of the scales, extract the current bboxes that
                # were attached to the images
                msg = '{}Extracting bbox estimations from previous ' \
                      'scale.'.format(scale_prefix)
                wrap = partial(print_progress,
                               prefix=msg,
                               end_with_newline=False,
                               verbose=verbose)
                for ii in wrap(scaled_images):
                    c_bboxes = []
                    for k in list(range(self.n_perturbations)):
                        c_key = '__ert_current_bbox_{}'.format(k)
                        c_bboxes.append(ii.landmarks[c_key].lms)
                    current_bounding_boxes.append(c_bboxes)

            # Extract scaled ground truth shapes for current scale
            scaled_gt_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Train the Dlib model.  This returns the bbox estimations for the
            # next scale.
            current_bounding_boxes = self.algorithms[j].train(
                scaled_images,
                scaled_gt_shapes,
                current_bounding_boxes,
                prefix=scale_prefix,
                verbose=verbose)

            # Scale the current bbox estimations for the next level. This
            # doesn't have to be done for the last scale. The only thing we need
            # to do at the last scale is to remove any attached landmarks from
            # the training images.
            if j < (self.n_scales - 1):
                for jj, image_bboxes in enumerate(current_bounding_boxes):
                    for k, bbox in enumerate(image_bboxes):
                        c_key = '__ert_current_bbox_{}'.format(k)
                        images[jj].landmarks[c_key] = \
                            scale_transforms[jj].apply(bbox)
Ejemplo n.º 9
0
    def _train(self,
               original_images,
               group=None,
               bounding_box_group_glob=None,
               verbose=False):
        r"""
        """
        # Dlib does not support incremental builds, so we must be passed a list
        if not isinstance(original_images, list):
            original_images = list(original_images)
        # We use temporary landmark groups - so we need the group key to not be
        # None
        if group is None:
            group = original_images[0].landmarks.group_labels[0]

        # Temporarily store all the bounding boxes for rescaling
        for i in original_images:
            i.landmarks['__gt_bb'] = i.landmarks[group].lms.bounding_box()

        if self.reference_shape is None:
            # If no reference shape was given, use the mean of the first batch
            self.reference_shape = compute_reference_shape(
                [i.landmarks['__gt_bb'].lms for i in original_images],
                self.diagonal,
                verbose=verbose)

        # Rescale to existing reference shape
        images = rescale_images_to_reference_shape(original_images,
                                                   '__gt_bb',
                                                   self.reference_shape,
                                                   verbose=verbose)

        # Scaling is done - remove temporary gt bounding boxes
        for i, i2 in zip(original_images, images):
            del i.landmarks['__gt_bb']
            del i2.landmarks['__gt_bb']

        generated_bb_func = generate_perturbations_from_gt(
            images,
            self.n_perturbations,
            self._perturb_from_gt_bounding_box,
            gt_group=group,
            bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # for each scale (low --> high)
        current_bounding_boxes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = images

            if j == 0:
                current_bounding_boxes = [
                    generated_bb_func(im) for im in scaled_images
                ]

            # Extract scaled ground truth shapes for current scale
            scaled_gt_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Train the Dlib model
            current_bounding_boxes = self.algorithms[j].train(
                scaled_images,
                scaled_gt_shapes,
                current_bounding_boxes,
                prefix=scale_prefix,
                verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for bboxes in current_bounding_boxes:
                    for bb in enumerate(bboxes):
                        bboxes[k] = transform.apply(bb)
Ejemplo n.º 10
0
    def _train(self, original_images, group=None, bounding_box_group_glob=None,
               verbose=False):
        # Dlib does not support incremental builds, so we must be passed a list
        if not isinstance(original_images, list):
            original_images = list(original_images)
        # We use temporary landmark groups - so we need the group key to not be
        # None
        if group is None:
            group = original_images[0].landmarks.group_labels[0]

        # Temporarily store all the bounding boxes for rescaling
        for i in original_images:
            i.landmarks['__gt_bb'] = i.landmarks[group].bounding_box()

        if self.reference_shape is None:
            # If no reference shape was given, use the mean of the first batch
            self._reference_shape = compute_reference_shape(
                [i.landmarks['__gt_bb'] for i in original_images],
                self.diagonal, verbose=verbose)

        # Rescale images wrt the scale factor between the existing
        # reference_shape and their ground truth (group) bboxes
        images = rescale_images_to_reference_shape(
            original_images, '__gt_bb', self.reference_shape,
            verbose=verbose)

        # Scaling is done - remove temporary gt bounding boxes
        for i, i2 in zip(original_images, images):
            del i.landmarks['__gt_bb']
            del i2.landmarks['__gt_bb']

        # Create a callable that generates perturbations of the bounding boxes
        # of the provided images.
        generated_bb_func = generate_perturbations_from_gt(
            images, self.n_perturbations, self._perturb_from_gt_bounding_box,
            gt_group=group, bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # For each scale (low --> high)
        for j in range(self.n_scales):
            # Print progress if asked
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Rescale images according to scales. Note that scale_images is smart
            # enough in order not to rescale the images if the current scale
            # factor equals to 1.
            scaled_images, scale_transforms = scale_images(
                images, self.scales[j], prefix=scale_prefix,
                return_transforms=True, verbose=verbose)

            # Get bbox estimations of current scale. If we are at the first
            # scale, this is done by using generated_bb_func. If we are at the
            # rest of the scales, then the current bboxes are attached on the
            # scaled_images with key '__ert_current_bbox_{}'.
            current_bounding_boxes = []
            if j == 0:
                # At the first scale, the current bboxes are created by calling
                # generated_bb_func.
                current_bounding_boxes = [generated_bb_func(im)
                                          for im in scaled_images]
            else:
                # At the rest of the scales, extract the current bboxes that
                # were attached to the images
                msg = '{}Extracting bbox estimations from previous ' \
                      'scale.'.format(scale_prefix)
                wrap = partial(print_progress, prefix=msg,
                               end_with_newline=False, verbose=verbose)
                for ii in wrap(scaled_images):
                    c_bboxes = []
                    for k in list(range(self.n_perturbations)):
                        c_key = '__ert_current_bbox_{}'.format(k)
                        c_bboxes.append(ii.landmarks[c_key])
                    current_bounding_boxes.append(c_bboxes)

            # Extract scaled ground truth shapes for current scale
            scaled_gt_shapes = [i.landmarks[group] for i in scaled_images]

            # Train the Dlib model.  This returns the bbox estimations for the
            # next scale.
            current_bounding_boxes = self.algorithms[j].train(
                scaled_images, scaled_gt_shapes, current_bounding_boxes,
                prefix=scale_prefix, verbose=verbose)

            # Scale the current bbox estimations for the next level. This
            # doesn't have to be done for the last scale. The only thing we need
            # to do at the last scale is to remove any attached landmarks from
            # the training images.
            if j < (self.n_scales - 1):
                for jj, image_bboxes in enumerate(current_bounding_boxes):
                    for k, bbox in enumerate(image_bboxes):
                        c_key = '__ert_current_bbox_{}'.format(k)
                        images[jj].landmarks[c_key] = \
                            scale_transforms[jj].apply(bbox)
Ejemplo n.º 11
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     verbose=False, shape_forgetting_factor=1.0,
                     appearance_forgetting_factor=1.0):
        r"""
        Builds an Active Appearance Model from a list of landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.
        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from
            lowest to highest scale
        """
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                shape_model = self._build_shape_model(scale_shapes, j)
                self.shape_models.append(shape_model)
            else:
                self._increment_shape_model(
                    scale_shapes, j, forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              scaled_reference_shape,
                                              j, scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(
                    scale_prefix))

            if not increment:
                appearance_model = PCAModel(warped_images)
                # trim appearance model if required
                if self.max_appearance_components is not None:
                    appearance_model.trim_components(
                        self.max_appearance_components[j])
                # add appearance model to the list
                self.appearance_models.append(appearance_model)
            else:
                # increment appearance model
                self.appearance_models[j].increment(
                    warped_images,
                    forgetting_factor=appearance_forgetting_factor)
                # trim appearance model if required
                if self.max_appearance_components is not None:
                    self.appearance_models[j].trim_components(
                        self.max_appearance_components[j])

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
Ejemplo n.º 12
0
    def _train_batch(self,
                     image_batch,
                     increment=False,
                     group=None,
                     verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(image_batch,
                                                        group,
                                                        self.reference_shape,
                                                        verbose=verbose)

        # If the deformation graph was not provided (None given), then compute
        # the MST
        if None in self.deformation_graph:
            graph_shapes = [i.landmarks[group].lms for i in image_batch]
            deformation_mst = _compute_minimum_spanning_tree(graph_shapes,
                                                             root_vertex=0,
                                                             prefix='- ',
                                                             verbose=verbose)
            self.deformation_graph = [
                deformation_mst if g is None else g
                for g in self.deformation_graph
            ]

        # Build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif (j == 0 or self.holistic_features[j]
                  is not self.holistic_features[j - 1]):
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Apply procrustes to align the shapes
            aligned_shapes = align_shapes(scale_shapes)

            # Build the shape model using the aligned shapes
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                self.shape_models.append(
                    self._build_shape_model(aligned_shapes,
                                            self.shape_graph[j],
                                            self.max_shape_components[j],
                                            verbose=verbose))
            else:
                self.shape_models[j].increment(aligned_shapes, verbose=verbose)

            # Build the deformation model
            if verbose:
                print_dynamic(
                    '{}Building deformation model'.format(scale_prefix))

            if self.use_procrustes:
                deformation_shapes = aligned_shapes
            else:
                deformation_shapes = scale_shapes

            if not increment:
                self.deformation_models.append(
                    self._build_deformation_model(deformation_shapes,
                                                  self.deformation_graph[j],
                                                  verbose=verbose))
            else:
                self.deformation_models[j].increment(deformation_shapes,
                                                     verbose=verbose)

            # Obtain warped images
            warped_images = self._warp_images(scaled_images, scale_shapes, j,
                                              scale_prefix, verbose)

            # Build the appearance model
            if verbose:
                print_dynamic(
                    '{}Building appearance model'.format(scale_prefix))

            if not increment:
                self.appearance_models.append(
                    self._build_appearance_model(
                        warped_images,
                        self.appearance_graph[j],
                        self.n_appearance_components[j],
                        verbose=verbose))
            else:
                self._increment_appearance_model(warped_images,
                                                 self.appearance_graph[j],
                                                 self.appearance_models[j],
                                                 verbose=verbose)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
Ejemplo n.º 13
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape, verbose=verbose)

        # If the deformation graph was not provided (None given), then compute
        # the MST
        if None in self.deformation_graph:
            graph_shapes = [i.landmarks[group] for i in image_batch]
            deformation_mst = _compute_minimum_spanning_tree(
                graph_shapes, root_vertex=0, prefix='- ', verbose=verbose)
            self.deformation_graph = [deformation_mst if g is None else g
                                      for g in self.deformation_graph]

        # Build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif (j == 0 or self.holistic_features[j] is not
                  self.holistic_features[j - 1]):
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group] for i in scaled_images]

            # Apply procrustes to align the shapes
            aligned_shapes = align_shapes(scale_shapes)

            # Build the shape model using the aligned shapes
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                self.shape_models.append(self._build_shape_model(
                    aligned_shapes, self.shape_graph[j],
                    self.max_shape_components[j], verbose=verbose))
            else:
                self.shape_models[j].increment(aligned_shapes, verbose=verbose)

            # Build the deformation model
            if verbose:
                print_dynamic('{}Building deformation model'.format(
                    scale_prefix))

            if self.use_procrustes:
                deformation_shapes = aligned_shapes
            else:
                deformation_shapes = scale_shapes

            if not increment:
                self.deformation_models.append(self._build_deformation_model(
                    deformation_shapes, self.deformation_graph[j],
                    verbose=verbose))
            else:
                self.deformation_models[j].increment(deformation_shapes,
                                                     verbose=verbose)

            # Obtain warped images
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              j, scale_prefix, verbose)

            # Build the appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(
                    scale_prefix))

            if not increment:
                self.appearance_models.append(self._build_appearance_model(
                    warped_images, self.appearance_graph[j],
                    self.n_appearance_components[j], verbose=verbose))
            else:
                self._increment_appearance_model(
                    warped_images, self.appearance_graph[j],
                    self.appearance_models[j], verbose=verbose)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
Ejemplo n.º 14
0
    def _train_batch(self, template, shape_batch, increment=False, group=None,
                     shape_forgetting_factor=1.0, verbose=False):
        r"""
        Builds an Active Template Model from a list of landmarked images.
        """
        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle features
            if j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features([template],
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
                # Extract potentially rescaled shapes
                scale_transform = Scale(scale_factor=self.scales[j],
                                        n_dims=2)
                scale_shapes = [scale_transform.apply(s)
                                for s in shape_batch]
            else:
                scaled_images = feature_images
                scale_shapes = shape_batch

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                if j == 0:
                    shape_model = self._build_shape_model(scale_shapes, j)
                    self.shape_models.append(shape_model)
                else:
                    self.shape_models.append(deepcopy(shape_model))
            else:
                self._increment_shape_model(
                    scale_shapes,  self.shape_models[j],
                    forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_template = self._warp_template(scaled_images[0], group,
                                                  scaled_reference_shape,
                                                  j, scale_prefix, verbose)
            self.warped_templates.append(warped_template[0])

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))

        # Because we just copy the shape model, we need to wait to trim
        # it after building each model. This ensures we can have a different
        # number of components per level
        for j, sm in enumerate(self.shape_models):
            max_sc = self.max_shape_components[j]
            if max_sc is not None:
                sm.trim_components(max_sc)
Ejemplo n.º 15
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     verbose=False, shape_forgetting_factor=1.0,
                     appearance_forgetting_factor=1.0):
        r"""
        Builds an Active Appearance Model from a list of landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.
        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from
            lowest to highest scale
        """
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                if j == 0:
                    shape_model = self._build_shape_model(
                        scale_shapes, j)
                    self.shape_models.append(shape_model)
                else:
                    self.shape_models.append(deepcopy(shape_model))
            else:
                self._increment_shape_model(
                    scale_shapes,  self.shape_models[j],
                    forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              scaled_reference_shape,
                                              j, scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(
                    scale_prefix))

            if not increment:
                appearance_model = PCAModel(warped_images)
                # trim appearance model if required
                if self.max_appearance_components is not None:
                    appearance_model.trim_components(
                        self.max_appearance_components[j])
                # add appearance model to the list
                self.appearance_models.append(appearance_model)
            else:
                # increment appearance model
                self.appearance_models[j].increment(
                    warped_images,
                    forgetting_factor=appearance_forgetting_factor)
                # trim appearance model if required
                if self.max_appearance_components is not None:
                    self.appearance_models[j].trim_components(
                        self.max_appearance_components[j])

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))

        # Because we just copy the shape model, we need to wait to trim
        # it after building each model. This ensures we can have a different
        # number of components per level
        for j, sm in enumerate(self.shape_models):
            max_sc = self.max_shape_components[j]
            if max_sc is not None:
                sm.trim_components(max_sc)
Ejemplo n.º 16
0
    def _train(self, original_images, group=None, bounding_box_group_glob=None,
               verbose=False):
        r"""
        """
        # Dlib does not support incremental builds, so we must be passed a list
        if not isinstance(original_images, list):
            original_images = list(original_images)
        # We use temporary landmark groups - so we need the group key to not be
        # None
        if group is None:
            group = original_images[0].landmarks.group_labels[0]

        # Temporarily store all the bounding boxes for rescaling
        for i in original_images:
            i.landmarks['__gt_bb'] = i.landmarks[group].lms.bounding_box()

        if self.reference_shape is None:
            # If no reference shape was given, use the mean of the first batch
            self.reference_shape = compute_reference_shape(
                [i.landmarks['__gt_bb'].lms for i in original_images],
                self.diagonal, verbose=verbose)

        # Rescale to existing reference shape
        images = rescale_images_to_reference_shape(
            original_images, '__gt_bb', self.reference_shape,
            verbose=verbose)

        # Scaling is done - remove temporary gt bounding boxes
        for i, i2 in zip(original_images, images):
            del i.landmarks['__gt_bb']
            del i2.landmarks['__gt_bb']

        generated_bb_func = generate_perturbations_from_gt(
            images, self.n_perturbations, self._perturb_from_gt_bounding_box,
            gt_group=group, bb_group_glob=bounding_box_group_glob,
            verbose=verbose)

        # for each scale (low --> high)
        current_bounding_boxes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = images

            if j == 0:
                current_bounding_boxes = [generated_bb_func(im)
                                          for im in scaled_images]

            # Extract scaled ground truth shapes for current scale
            scaled_gt_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Train the Dlib model
            current_bounding_boxes = self.algorithms[j].train(
                scaled_images, scaled_gt_shapes, current_bounding_boxes,
                prefix=scale_prefix, verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for bboxes in current_bounding_boxes:
                    for bb in bboxes:
                        transform.apply_inplace(bb)
Ejemplo n.º 17
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     bounding_box_group_glob=None, verbose=False):
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        generated_bb_func = generate_perturbations_from_gt(
            image_batch, self.n_perturbations,
            self._perturb_from_gt_bounding_box, gt_group=group,
            bb_group_glob=bounding_box_group_glob, verbose=verbose)

        # for each scale (low --> high)
        current_shapes = []
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract scaled ground truth shapes for current scale
            scaled_shapes = [i.landmarks[group].lms for i in scaled_images]

            if j == 0:
                msg = '{}Aligning reference shape with bounding boxes.'.format(
                    scale_prefix)
                wrap = partial(print_progress, prefix=msg,
                               end_with_newline=False, verbose=verbose)

                # Extract perturbations at the very bottom level
                for ii in wrap(scaled_images):
                    c_shapes = []
                    for bbox in generated_bb_func(ii):
                        c_s = align_shape_with_bounding_box(
                            self.reference_shape, bbox)
                        c_shapes.append(c_s)
                    current_shapes.append(c_shapes)

            # train supervised descent algorithm
            if not increment:
                current_shapes = self.algorithms[j].train(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)
            else:
                current_shapes = self.algorithms[j].increment(
                    scaled_images, scaled_shapes, current_shapes,
                    prefix=scale_prefix, verbose=verbose)

            # Scale current shapes to next resolution, don't bother
            # scaling final level
            if j != (self.n_scales - 1):
                transform = Scale(self.scales[j + 1] / self.scales[j],
                                  n_dims=2)
                for image_shapes in current_shapes:
                    for shape in image_shapes:
                        transform.apply_inplace(shape)
Ejemplo n.º 18
0
    def _train_batch(self,
                     image_batch,
                     increment=False,
                     group=None,
                     shape_forgetting_factor=1.0,
                     verbose=False):
        # normalize images
        image_batch = rescale_images_to_reference_shape(image_batch,
                                                        group,
                                                        self.reference_shape,
                                                        verbose=verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Training models\n')

        # for each level (low --> high)
        for i in range(self.n_scales):
            if verbose:
                if self.n_scales > 1:
                    prefix = '  - Scale {}: '.format(i)
                else:
                    prefix = '  - '
            else:
                prefix = None

            # Handle holistic features
            if i == 0 and self.holistic_features[i] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif i == 0 or self.holistic_features[
                    i] is not self.holistic_features[i - 1]:
                # compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[i],
                                                  prefix=prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[i] != 1:
                # scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[i],
                                             prefix=prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # extract scaled shapes
            scaled_shapes = [image.landmarks[group] for image in scaled_images]

            # train shape model
            if verbose:
                print_dynamic('{}Training shape model'.format(prefix))

            if not increment:
                shape_model = self._build_shape_model(scaled_shapes, i)
                self.shape_models.append(shape_model)
            else:
                self._increment_shape_model(
                    scaled_shapes,
                    i,
                    forgetting_factor=shape_forgetting_factor)

            # train expert ensemble
            if verbose:
                print_dynamic('{}Training expert ensemble'.format(prefix))

            if increment:
                self.expert_ensembles[i].increment(scaled_images,
                                                   scaled_shapes,
                                                   prefix=prefix,
                                                   verbose=verbose)
            else:
                expert_ensemble = self.expert_ensemble_cls[i](
                    images=scaled_images,
                    shapes=scaled_shapes,
                    patch_shape=self.patch_shape[i],
                    patch_normalisation=self.patch_normalisation,
                    cosine_mask=self.cosine_mask,
                    context_shape=self.context_shape[i],
                    sample_offsets=self.sample_offsets,
                    prefix=prefix,
                    verbose=verbose)
                self.expert_ensembles.append(expert_ensemble)

            if verbose:
                print_dynamic('{}Done\n'.format(prefix))
Ejemplo n.º 19
0
    def _train_batch(self, template, shape_batch, increment=False, group=None,
                     shape_forgetting_factor=1.0, verbose=False):
        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle features
            if j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features([template],
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
                # Extract potentially rescaled shapes
                scale_transform = Scale(scale_factor=self.scales[j],
                                        n_dims=2)
                scale_shapes = [scale_transform.apply(s)
                                for s in shape_batch]
            else:
                scaled_images = feature_images
                scale_shapes = shape_batch

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                shape_model = self._build_shape_model(scale_shapes, j)
                self.shape_models.append(shape_model)
            else:
                self._increment_shape_model(
                    scale_shapes, j, forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_template = self._warp_template(scaled_images[0], group,
                                                  scaled_reference_shape,
                                                  j, scale_prefix, verbose)
            self.warped_templates.append(warped_template[0])

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))