Exemplo n.º 1
0
 def _build_shape_model(self, shapes, scale_index):
     mean_aligned_shape = mean_pointcloud(align_shapes(shapes))
     self.n_landmarks = mean_aligned_shape.n_points
     self.reference_frame = build_reference_frame(mean_aligned_shape)
     dense_shapes = densify_shapes(shapes, self.reference_frame,
                                   self.transform)
     # build dense shape model
     shape_model = build_shape_model(dense_shapes)
     return shape_model
Exemplo n.º 2
0
    def build(self, images, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Constrained Local Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`Image`
            The set of landmarked images from which to build the AAM.
        group : string, Optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        label : `string`, optional
            The label of of the landmark manager that you wish to use. If
            ``None``, the convex hull of all landmarks is used.
        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        clm : :map:`CLM`
            The CLM object
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            normalization_wrt_reference_shape(
                images, group, label, self.normalization_diagonal,
                verbose=verbose)

        # create pyramid
        generators = create_pyramid(normalized_images, self.n_levels,
                                    self.downscale, self.features,
                                    verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        classifiers = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters of type list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get images of current level
            feature_images = []
            for c, g in enumerate(generators):
                if verbose:
                    print_dynamic(
                        '{}Computing feature space/rescaling - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                feature_images.append(next(g))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label] for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = build_shape_model(
                train_shapes, self.max_shape_components[rj])

            # add shape model to the list
            shape_models.append(shape_model)

            # build classifiers
            sampling_grid = build_sampling_grid(self.patch_shape)
            n_points = shapes[0].n_points
            level_classifiers = []
            for k in range(n_points):
                if verbose:
                    print_dynamic('{}Building classifiers - {}'.format(
                        level_str,
                        progress_bar_str((k + 1.) / n_points,
                                         show_bar=False)))

                positive_labels = []
                negative_labels = []
                positive_samples = []
                negative_samples = []

                for i, s in zip(feature_images, shapes):

                    max_x = i.shape[0] - 1
                    max_y = i.shape[1] - 1

                    point = (np.round(s.points[k, :])).astype(int)
                    patch_grid = sampling_grid + point[None, None, ...]
                    positive, negative = get_pos_neg_grid_positions(
                        patch_grid, positive_grid_size=(1, 1))

                    x = positive[:, 0]
                    y = positive[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    positive_sample = i.pixels[positive[:, 0],
                                               positive[:, 1], :]
                    positive_samples.append(positive_sample)
                    positive_labels.append(np.ones(positive_sample.shape[0]))

                    x = negative[:, 0]
                    y = negative[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    negative_sample = i.pixels[x, y, :]
                    negative_samples.append(negative_sample)
                    negative_labels.append(-np.ones(negative_sample.shape[0]))

                positive_samples = np.asanyarray(positive_samples)
                positive_samples = np.reshape(positive_samples,
                                              (-1, positive_samples.shape[-1]))
                positive_labels = np.asanyarray(positive_labels).flatten()

                negative_samples = np.asanyarray(negative_samples)
                negative_samples = np.reshape(negative_samples,
                                              (-1, negative_samples.shape[-1]))
                negative_labels = np.asanyarray(negative_labels).flatten()

                X = np.vstack((positive_samples, negative_samples))
                t = np.hstack((positive_labels, negative_labels))

                clf = self.classifier_trainers[rj](X, t)
                level_classifiers.append(clf)

            # add level classifiers to the list
            classifiers.append(level_classifiers)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        classifiers.reverse()
        n_training_images = len(images)

        from .base import CLM
        return CLM(shape_models, classifiers, n_training_images,
                   self.patch_shape, self.features, self.reference_shape,
                   self.downscale, self.scaled_shape_models)
Exemplo n.º 3
0
 def _build_shape_model(self, shapes, scale_index):
     return build_shape_model(shapes)
Exemplo n.º 4
0
    def build(self, shapes, template, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Active Template Model given a list of shapes and a
        template image.

        Parameters
        ----------
        shapes : list of :map:`PointCloud`
            The set of shapes from which to build the shape model of the ATM.

        template : :map:`Image` or subclass
            The image to be used as template.

        group : `string`, optional
            The key of the landmark set of the template that should be used. If
            ``None``, and if there is only one set of landmarks, this set will
            be used.

        label : `string`, optional
            The label of the landmark manager of the template that you wish to
            use. If ``None`` is passed, the convex hull of all landmarks is
            used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        atm : :map:`ATM`
            The ATM object. Shape and appearance models are stored from lowest
            to highest level.
        """
        # compute reference_shape
        self.reference_shape = compute_reference_shape(
            shapes, self.normalization_diagonal, verbose=verbose)

        # normalize the template size using the reference_shape scaling
        if verbose:
            print_dynamic('- Normalizing template size')
        normalized_template = template.rescale_to_reference_shape(
            self.reference_shape, group=group, label=label)

        # create pyramid for template image
        if verbose:
            print_dynamic('- Creating template pyramid')
        generator = create_pyramid([normalized_template], self.n_levels,
                                   self.downscale, self.features)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        warped_templates = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters in form of list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # rescale shapes if required
            if j > 0 and self.scaled_shape_models:
                scale_transform = Scale(scale_factor=1.0 / self.downscale,
                                        n_dims=2)
                shapes = [scale_transform.apply(s) for s in shapes]

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = build_shape_model(shapes,
                                            self.max_shape_components[rj])
            reference_frame = self._build_reference_frame(shape_model.mean())

            # add shape model to the list
            shape_models.append(shape_model)

            # get template's feature image of current level
            if verbose:
                print_dynamic('{}Warping template'.format(level_str))
            feature_template = next(generator[0])

            # compute transform
            transform = self.transform(reference_frame.landmarks['source'].lms,
                                       feature_template.landmarks[group][label])

            # warp template to reference frame
            warped_templates.append(
                feature_template.warp_to_mask(reference_frame.mask, transform))

            # attach reference_frame to template's source shape
            warped_templates[j].landmarks['source'] = \
                reference_frame.landmarks['source']

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        warped_templates.reverse()
        n_training_shapes = len(shapes)

        return self._build_atm(shape_models, warped_templates,
                               n_training_shapes)
Exemplo n.º 5
0
    def build(self, images, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.

        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.

        label : `string`, optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from lowest
            to highest level
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            normalization_wrt_reference_shape(images, group, label,
                                              self.normalization_diagonal,
                                              verbose=verbose)

        # create pyramid
        generators = create_pyramid(normalized_images, self.n_levels,
                                    self.downscale, self.features,
                                    verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        appearance_models = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters in form of list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get feature images of current level
            feature_images = []
            for c, g in enumerate(generators):
                if verbose:
                    print_dynamic(
                        '{}Computing feature space/rescaling - {}'.format(
                        level_str,
                        progress_bar_str((c + 1.) / len(generators),
                                         show_bar=False)))
                feature_images.append(next(g))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label] for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = build_shape_model(
                train_shapes, self.max_shape_components[rj])
            reference_frame = self._build_reference_frame(shape_model.mean())

            # add shape model to the list
            shape_models.append(shape_model)

            # compute transforms
            if verbose:
                print_dynamic('{}Computing transforms'.format(level_str))


            # Create a dummy initial transform
            s_to_t_transform = self.transform(
                reference_frame.landmarks['source'].lms,
                reference_frame.landmarks['source'].lms)

            # warp images to reference frame
            warped_images = []
            for c, i in enumerate(feature_images):
                if verbose:
                    print_dynamic('{}Warping images - {}'.format(
                        level_str,
                        progress_bar_str(float(c + 1) / len(feature_images),
                                         show_bar=False)))
                # Setting the target can be significantly faster for transforms
                # such as CachedPiecewiseAffine
                s_to_t_transform.set_target(i.landmarks[group][label])
                warped_images.append(i.warp_to_mask(reference_frame.mask,
                                                    s_to_t_transform))

            # attach reference_frame to images' source shape
            for i in warped_images:
                i.landmarks['source'] = reference_frame.landmarks['source']

            # build appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(level_str))
            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[rj] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[rj])

            # add appearance model to the list
            appearance_models.append(appearance_model)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()
        n_training_images = len(images)

        return self._build_aam(shape_models, appearance_models,
                               n_training_images)
Exemplo n.º 6
0
    def build(self, images, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.

        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.

        label : `string`, optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from lowest
            to highest level
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            normalization_wrt_reference_shape(images, group, label,
                                              self.normalization_diagonal,
                                              verbose=verbose)

        # create pyramid
        generators = create_pyramid(normalized_images,
                                    self.n_levels,
                                    self.downscale,
                                    self.features,
                                    verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        appearance_models = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters in form of list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get feature images of current level
            feature_images = []
            for c, g in enumerate(generators):
                if verbose:
                    print_dynamic(
                        '{}Computing feature space/rescaling - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                feature_images.append(next(g))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label] for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = build_shape_model(train_shapes,
                                            self.max_shape_components[rj])
            reference_frame = self._build_reference_frame(shape_model.mean())

            # add shape model to the list
            shape_models.append(shape_model)

            # compute transforms
            if verbose:
                print_dynamic('{}Computing transforms'.format(level_str))
            transforms = [
                self.transform(reference_frame.landmarks['source'].lms,
                               i.landmarks[group][label])
                for i in feature_images
            ]

            # warp images to reference frame
            warped_images = []
            for c, (i, t) in enumerate(zip(feature_images, transforms)):
                if verbose:
                    print_dynamic('{}Warping images - {}'.format(
                        level_str,
                        progress_bar_str(float(c + 1) / len(feature_images),
                                         show_bar=False)))
                warped_images.append(i.warp_to_mask(reference_frame.mask, t))

            # attach reference_frame to images' source shape
            for i in warped_images:
                i.landmarks['source'] = reference_frame.landmarks['source']

            # build appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(level_str))
            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[rj] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[rj])

            # add appearance model to the list
            appearance_models.append(appearance_model)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()
        n_training_images = len(images)

        return self._build_aam(shape_models, appearance_models,
                               n_training_images)
Exemplo n.º 7
0
    def build(self, images, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Constrained Local Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`Image`
            The set of landmarked images from which to build the AAM.
        group : string, Optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        label : `string`, optional
            The label of of the landmark manager that you wish to use. If
            ``None``, the convex hull of all landmarks is used.
        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        clm : :map:`CLM`
            The CLM object
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            normalization_wrt_reference_shape(
                images, group, label, self.normalization_diagonal,
                verbose=verbose)

        # create pyramid
        generators = create_pyramid(normalized_images,
                                    self.n_levels,
                                    self.downscale,
                                    self.features,
                                    verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        classifiers = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters of type list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get images of current level
            feature_images = []
            for c, g in enumerate(generators):
                if verbose:
                    print_dynamic(
                        '{}Computing feature space/rescaling - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                feature_images.append(next(g))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label] for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = build_shape_model(train_shapes,
                                            self.max_shape_components[rj])

            # add shape model to the list
            shape_models.append(shape_model)

            # build classifiers
            sampling_grid = build_sampling_grid(self.patch_shape)
            n_points = shapes[0].n_points
            level_classifiers = []
            for k in range(n_points):
                if verbose:
                    print_dynamic('{}Building classifiers - {}'.format(
                        level_str,
                        progress_bar_str((k + 1.) / n_points, show_bar=False)))

                positive_labels = []
                negative_labels = []
                positive_samples = []
                negative_samples = []

                for i, s in zip(feature_images, shapes):

                    max_x = i.shape[0] - 1
                    max_y = i.shape[1] - 1

                    point = (np.round(s.points[k, :])).astype(int)
                    patch_grid = sampling_grid + point[None, None, ...]
                    positive, negative = get_pos_neg_grid_positions(
                        patch_grid, positive_grid_size=(1, 1))

                    x = positive[:, 0]
                    y = positive[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    positive_sample = i.pixels[positive[:, 0], positive[:,
                                                                        1], :]
                    positive_samples.append(positive_sample)
                    positive_labels.append(np.ones(positive_sample.shape[0]))

                    x = negative[:, 0]
                    y = negative[:, 1]
                    x[x > max_x] = max_x
                    y[y > max_y] = max_y
                    x[x < 0] = 0
                    y[y < 0] = 0

                    negative_sample = i.pixels[x, y, :]
                    negative_samples.append(negative_sample)
                    negative_labels.append(-np.ones(negative_sample.shape[0]))

                positive_samples = np.asanyarray(positive_samples)
                positive_samples = np.reshape(positive_samples,
                                              (-1, positive_samples.shape[-1]))
                positive_labels = np.asanyarray(positive_labels).flatten()

                negative_samples = np.asanyarray(negative_samples)
                negative_samples = np.reshape(negative_samples,
                                              (-1, negative_samples.shape[-1]))
                negative_labels = np.asanyarray(negative_labels).flatten()

                X = np.vstack((positive_samples, negative_samples))
                t = np.hstack((positive_labels, negative_labels))

                clf = self.classifier_trainers[rj](X, t)
                level_classifiers.append(clf)

            # add level classifiers to the list
            classifiers.append(level_classifiers)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        classifiers.reverse()
        n_training_images = len(images)

        from .base import CLM
        return CLM(shape_models, classifiers, n_training_images,
                   self.patch_shape, self.features, self.reference_shape,
                   self.downscale, self.scaled_shape_models)
Exemplo n.º 8
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     verbose=False):
        r"""
        """
        # normalize images
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape, verbose=verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Training models\n')

        # for each level (low --> high)
        for i in range(self.n_scales):
            if verbose:
                if self.n_scales > 1:
                    prefix = '  - Scale {}: '.format(i)
                else:
                    prefix = '  - '
            else:
                prefix = None

            # Handle holistic features
            if i == 0 and self.holistic_features[i] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif i == 0 or self.holistic_features[i] is not self.holistic_features[i - 1]:
                # compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[i],
                                                  prefix=prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[i] != 1:
                # scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[i],
                                             prefix=prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # extract scaled shapes
            scaled_shapes = [image.landmarks[group].lms
                             for image in scaled_images]

            # train shape model
            if verbose:
                print_dynamic('{}Training shape model'.format(prefix))

            # TODO: This should be cleaned up by defining shape model classes
            if increment:
                increment_shape_model(
                    self.shape_models[i], scaled_shapes,
                    max_components=self.max_shape_components[i],
                    forgetting_factor=self.shape_forgetting_factor,
                    prefix=prefix, verbose=verbose)

            else:
                shape_model = build_shape_model(
                    scaled_shapes, max_components=self.max_shape_components[i],
                    prefix=prefix, verbose=verbose)
                self.shape_models.append(shape_model)

            # train expert ensemble
            if verbose:
                print_dynamic('{}Training expert ensemble'.format(prefix))

            if increment:
                self.expert_ensembles[i].increment(scaled_images,
                                                   scaled_shapes,
                                                   prefix=prefix,
                                                   verbose=verbose)
            else:
                expert_ensemble = self.expert_ensemble_cls[i](scaled_images,
                                                              scaled_shapes,
                                                              prefix=prefix,
                                                              verbose=verbose)
                self.expert_ensembles.append(expert_ensemble)

            if verbose:
                print_dynamic('{}Done\n'.format(prefix))