예제 #1
0
def _build_shape_model(shapes, max_components):
    r"""
    Builds a shape model given a set of shapes.

    Parameters
    ----------
    shapes: list of :map:`PointCloud`
        The set of shapes from which to build the model.
    max_components: None or int or float
        Specifies the number of components of the trained shape model.
        If int, it specifies the exact number of components to be retained.
        If float, it specifies the percentage of variance to be retained.
        If None, all the available components are kept (100% of variance).

    Returns
    -------
    shape_model: :class:`menpo.model.pca`
        The PCA shape model.
    """
    # build shape model
    shape_model = PCAModel(shapes)
    if max_components is not None:
        # trim shape model if required
        shape_model.trim_components(max_components)

    return shape_model
예제 #2
0
def _build_appearance_model_full_yorgos(all_patches, n_appearance_parameters,
                                        patches_len, level_str, verbose):
    # build appearance model
    if verbose:
        print_dynamic('{}Training appearance distribution'.format(level_str))

    # get mean appearance vector
    n_images = len(all_patches)
    tmp = np.empty((patches_len, n_images))
    for c, i in enumerate(all_patches):
        tmp[..., c] = vectorize_patches_image(i)
    app_mean = np.mean(tmp, axis=1)

    # apply pca
    appearance_model = PCAModel(all_patches)

    # trim components
    if n_appearance_parameters is not None:
        appearance_model.trim_components(n_appearance_parameters)

    # compute covariance matrix
    app_cov = np.eye(
        appearance_model.n_features,
        appearance_model.n_features) - appearance_model.components.T.dot(
            appearance_model.components)

    return app_mean, app_cov
예제 #3
0
def test_pca_trim():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # trim components
    model.trim_components(5)
    # number of active components should be the same as number of components
    assert_equal(model.n_active_components, model.n_components)
예제 #4
0
def build_shape_model(shapes, max_components=None):
    r"""
    Builds a shape model given a set of shapes.

    Parameters
    ----------
    shapes: list of :map:`PointCloud`
        The set of shapes from which to build the model.
    max_components: None or int or float
        Specifies the number of components of the trained shape model.
        If int, it specifies the exact number of components to be retained.
        If float, it specifies the percentage of variance to be retained.
        If None, all the available components are kept (100% of variance).

    Returns
    -------
    shape_model: :class:`menpo.model.pca`
        The PCA shape model.
    """
    # centralize shapes
    centered_shapes = [Translation(-s.centre()).apply(s) for s in shapes]
    # align centralized shape using Procrustes Analysis
    gpa = GeneralizedProcrustesAnalysis(centered_shapes)
    aligned_shapes = [s.aligned_source() for s in gpa.transforms]

    # build shape model
    shape_model = PCAModel(aligned_shapes)
    if max_components is not None:
        # trim shape model if required
        shape_model.trim_components(max_components)

    return shape_model
예제 #5
0
def test_pca_trim():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # trim components
    model.trim_components(5)
    # number of active components should be the same as number of components
    assert_equal(model.n_active_components, model.n_components)
예제 #6
0
def _build_appearance_model_full(all_patches, n_appearance_parameters,
                                 patches_image_shape, level_str, verbose):
    # build appearance model
    if verbose:
        print_dynamic('{}Training appearance distribution'.format(level_str))

    # get mean appearance vector
    n_images = len(all_patches)
    tmp = np.empty(patches_image_shape + (n_images, ))
    for c, i in enumerate(all_patches):
        tmp[..., c] = i.pixels
    app_mean = np.mean(tmp, axis=-1)

    # apply pca
    appearance_model = PCAModel(all_patches)

    # trim components
    if n_appearance_parameters is not None:
        appearance_model.trim_components(n_appearance_parameters)

    # compute covariance matrix
    app_cov = appearance_model.components.T.dot(
        np.diag(1 / appearance_model.eigenvalues)).dot(
            appearance_model.components)

    return app_mean, app_cov
예제 #7
0
    def _build_shape_model(cls, shapes, max_components):
        r"""
        Builds a shape model given a set of shapes.

        Parameters
        ----------
        shapes: list of :map:`PointCloud`
            The set of shapes from which to build the model.
        max_components: None or int or float
            Specifies the number of components of the trained shape model.
            If int, it specifies the exact number of components to be retained.
            If float, it specifies the percentage of variance to be retained.
            If None, all the available components are kept (100% of variance).

        Returns
        -------
        shape_model: :class:`menpo.model.pca`
            The PCA shape model.
        """

        # centralize shapes
        centered_shapes = [Translation(-s.centre()).apply(s) for s in shapes]
        # align centralized shape using Procrustes Analysis
        gpa = GeneralizedProcrustesAnalysis(centered_shapes)
        aligned_shapes = [s.aligned_source() for s in gpa.transforms]
        # build shape model
        shape_model = PCAModel(aligned_shapes)
        if max_components is not None:
            # trim shape model if required
            shape_model.trim_components(max_components)

        return shape_model
예제 #8
0
def test_pca_orthogonalize_against():
    pca_samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    pca_model = PCAModel(pca_samples)
    lm_samples = np.asarray([np.random.randn(10) for _ in range(4)])
    lm_model = LinearModel(np.asarray(lm_samples))
    # orthogonalize
    pca_model.orthonormalize_against_inplace(lm_model)
    # number of active components must remain the same
    assert_equal(pca_model.n_active_components, 6)
예제 #9
0
def test_pca_orthogonalize_against():
    pca_samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    pca_model = PCAModel(pca_samples)
    lm_samples = np.asarray([np.random.randn(10) for _ in range(4)])
    lm_model = LinearModel(np.asarray(lm_samples))
    # orthogonalize
    pca_model.orthonormalize_against_inplace(lm_model)
    # number of active components must remain the same
    assert_equal(pca_model.n_active_components, 6)
예제 #10
0
def test_pca_n_active_components_too_many():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # too many components
    model.n_active_components = 100
    assert_equal(model.n_active_components, 9)
    # reset too smaller number of components
    model.n_active_components = 5
    assert_equal(model.n_active_components, 5)
    # reset to too many components
    model.n_active_components = 100
    assert_equal(model.n_active_components, 9)
예제 #11
0
def test_pca_n_active_components_too_many():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # too many components
    model.n_active_components = 100
    assert_equal(model.n_active_components, 9)
    # reset too smaller number of components
    model.n_active_components = 5
    assert_equal(model.n_active_components, 5)
    # reset to too many components
    model.n_active_components = 100
    assert_equal(model.n_active_components, 9)
예제 #12
0
def test_pca_increment_noncentred():
    pca_samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    ipca_model = PCAModel(pca_samples[:3], centre=False)
    ipca_model.increment(pca_samples[3:6])
    ipca_model.increment(pca_samples[6:])

    bpca_model = PCAModel(pca_samples, centre=False)

    assert_almost_equal(np.abs(ipca_model.components),
                        np.abs(bpca_model.components))
    assert_almost_equal(ipca_model.eigenvalues, bpca_model.eigenvalues)
    assert_almost_equal(ipca_model.mean_vector, bpca_model.mean_vector)
예제 #13
0
    def __init__(self, data, max_n_components=None):
        if isinstance(data, PCAModel):
            shape_model = data
        else:
            aligned_shapes = align_shapes(data)
            shape_model = PCAModel(aligned_shapes)

        if max_n_components is not None:
            shape_model.trim_components(max_n_components)
        super(PDM, self).__init__(shape_model)
        # Default target is the mean
        self._target = self.model.mean()
예제 #14
0
    def __init__(self, data, max_n_components=None):
        if isinstance(data, PCAModel):
            shape_model = data
        else:
            aligned_shapes = align_shapes(data)
            shape_model = PCAModel(aligned_shapes)

        if max_n_components is not None:
            shape_model.trim_components(max_n_components)
        super(PDM, self).__init__(shape_model)
        # Default target is the mean
        self._target = self.model.mean()
예제 #15
0
def test_pca_increment_centred():
    pca_samples = [PointCloud(np.random.randn(10, 2)) for _ in range(10)]
    ipca_model = PCAModel(pca_samples[:3])
    ipca_model.increment(pca_samples[3:6])
    ipca_model.increment(pca_samples[6:])

    bpca_model = PCAModel(pca_samples)

    assert_almost_equal(np.abs(ipca_model.components),
                        np.abs(bpca_model.components))
    assert_almost_equal(ipca_model.eigenvalues, bpca_model.eigenvalues)
    assert_almost_equal(ipca_model.mean().as_vector(),
                        bpca_model.mean().as_vector())
예제 #16
0
def test_pca_variance_after_trim():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # set number of active components
    model.trim_components(5)
    # kept variance must be smaller than total variance
    assert(model.variance() < model.original_variance())
    # kept variance ratio must be smaller than 1.0
    assert(model.variance_ratio() < 1.0)
    # noise variance must be bigger than 0.0
    assert(model.noise_variance() > 0.0)
    # noise variance ratio must also be bigger than 0.0
    assert(model.noise_variance_ratio() > 0.0)
    # inverse noise variance is computable
    assert(model.inverse_noise_variance() == 1 / model.noise_variance())
예제 #17
0
def test_pca_variance_after_trim():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # set number of active components
    model.trim_components(5)
    # kept variance must be smaller than total variance
    assert (model.variance < model.original_variance)
    # kept variance ratio must be smaller than 1.0
    assert (model.variance_ratio < 1.0)
    # noise variance must be bigger than 0.0
    assert (model.noise_variance > 0.0)
    # noise variance ratio must also be bigger than 0.0
    assert (model.noise_variance_ratio > 0.0)
    # inverse noise variance is computable
    assert (model.inverse_noise_variance == 1 / model.noise_variance)
예제 #18
0
def _build_shape_model(shapes, graph_shape, max_components, verbose=False):
    r"""
    Builds a shape model given a set of shapes.

    Parameters
    ----------
    shapes: list of :map:`PointCloud`
        The set of shapes from which to build the model.
    max_components: None or int or float
        Specifies the number of components of the trained shape model.
        If int, it specifies the exact number of components to be retained.
        If float, it specifies the percentage of variance to be retained.
        If None, all the available components are kept (100% of variance).

    Returns
    -------
    shape_model: :class:`menpo.model.pca`
        The PCA shape model.
    """
    # build shape model
    if graph_shape is not None:
        shape_model = SparsePCAModel(graph_shape.adjacency_array,
                                     shapes,
                                     2,
                                     verbose=verbose)
    else:
        shape_model = PCAModel(shapes)
    if max_components is not None:
        # trim shape model if required
        shape_model.trim_components(max_components)

    return shape_model
예제 #19
0
def load_n_create_generator(pattern,
                            detector_name,
                            group=None,
                            overwrite=False):
    # from menpo.landmark import LandmarkGroup
    from menpo.model import PCAModel
    try:
        cur_detector = _DETECTORS[detector_name]()
    except KeyError:
        detector_list = ', '.join(list(_DETECTORS.keys()))
        raise ValueError('Valid detector types are: {}'.format(detector_list))
    print('Running {} detector on {}'.format(detector_name, pattern))
    bboxes = [
        (img, detect_and_check(img, cur_detector, group=group))
        for img in mio.import_images(pattern, normalise=False, verbose=True)
    ]

    # find all the detections that did not fail
    detections = list(filter(lambda x: x[1] is not None, bboxes))

    print('Creating a model out of {} detections.'.format(len(detections)))
    # normalize these to size [1, 1], centred on origin
    normed_detections = [
        normalize(im.landmarks[group].bounding_box()).apply(det)
        for im, det in detections
    ]

    # build a PCA model from good detections
    pca = PCAModel(normed_detections)

    mio.export_pickle(pca,
                      '{}_gen.pkl'.format(detector_name),
                      overwrite=overwrite)
예제 #20
0
def load_tassos_lsfm_combined_model(path):
    m = loadmat(str(path))
    mean = TriMesh(m['mean'].reshape([-1, 3]), trilist=m['trilist'])
    return {
        'shape_model': PCAModel.init_from_components(
            m['components'].T,  m['eigenvalues'].ravel(),
            mean, 8888, True),
        'n_id_comps': int(m['n_trunc_ids'][0][0]),
        'n_exp_comps': int(m['n_trunc_expressions'][0][0])
    }
예제 #21
0
def _build_appearance_model_full(all_patches, n_appearance_parameters,
                                 level_str, verbose):
    # build appearance model
    if verbose:
        print_dynamic('{}Training appearance distribution'.format(level_str))

    # apply pca
    appearance_model = PCAModel(all_patches)

    # trim components
    if n_appearance_parameters is not None:
        appearance_model.trim_components(n_appearance_parameters)

    # get mean appearance vector
    app_mean = appearance_model.mean().as_vector()

    # compute covariance matrix
    app_cov = appearance_model.components.T.dot(np.diag(1/appearance_model.eigenvalues)).dot(appearance_model.components)

    return app_mean, app_cov
예제 #22
0
def test_pca_variance():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # kept variance must be equal to total variance
    assert_equal(model.variance, model.original_variance)
    # kept variance ratio must be 1.0
    assert_equal(model.variance_ratio, 1.0)
    # noise variance must be 0.0
    assert_equal(model.noise_variance, 0.0)
    # noise variance ratio must be also 0.0
    assert_equal(model.noise_variance_ratio, 0.0)
예제 #23
0
def create_generator(shapes, detections):
    # from menpo.landmark import LandmarkGroup
    from menpo.model import PCAModel

    # normalize these to size [1, 1], centred on origin
    normed_detections = [
        normalize(lms.bounding_box()).apply(det)
        for lms, det in zip(shapes, detections)
    ]

    # build a PCA model from good detections
    return PCAModel(normed_detections)
예제 #24
0
    def pca_prune(self, meshes):
        """
        PCA on TriMeshes features.

        Parameters:
            meshes (list of TriMesh): meshes to be PCA

        Return:
            PCA model
        """
        # process mesh files to have the same number of points
        meshes = self.analyse_meshes(meshes)

        pca_model = PCAModel(samples=meshes, verbose=True)
        n_comps_retained = int(sum(pca_model.eigenvalues_cumulative_ratio() < self.n_components)) if \
            self.n_components >= 1 else self.n_components
        if self.verbose:
            print(
                '\nRetaining {:.2%} of eigenvalues keeps {} components'.format(
                    self.n_components, n_comps_retained))
        pca_model.trim_components(self.n_components)
        print(
            "Final PCA Model:\n# of components: {}\n# of points for each mesh (3 dims total): {}\n"
            "eigen value accumulative ratios: {}".format(
                str(pca_model.components.shape[0]),
                str(pca_model.components.shape[1]),
                str(pca_model.eigenvalues_cumulative_ratio())))
        return pca_model
예제 #25
0
def _build_appearance_model_full(all_patches, n_appearance_parameters,
                                 patches_len, level_str, verbose):
    # build appearance model
    if verbose:
        print_dynamic('{}Training appearance distribution'.format(level_str))

    # get mean appearance vector
    n_images = len(all_patches)
    tmp = np.empty((patches_len, n_images))
    for c, i in enumerate(all_patches):
        tmp[..., c] = vectorize_patches_image(i)
    app_mean = np.mean(tmp, axis=1)

    # apply pca
    appearance_model = PCAModel(all_patches)

    # trim components
    if n_appearance_parameters is not None:
        appearance_model.trim_components(n_appearance_parameters)

    # compute covariance matrix
    app_cov = appearance_model.components.T.dot(np.diag(1/appearance_model.eigenvalues)).dot(appearance_model.components)

    return app_mean, app_cov
예제 #26
0
def _build_appearance_model_full_yorgos(all_patches, n_appearance_parameters,
                                        patches_image_shape, level_str, verbose):
    # build appearance model
    if verbose:
        print_dynamic('{}Training appearance distribution'.format(level_str))

    # get mean appearance vector
    n_images = len(all_patches)
    tmp = np.empty(patches_image_shape + (n_images,))
    for c, i in enumerate(all_patches):
        tmp[..., c] = i.pixels
    app_mean = np.mean(tmp, axis=-1)

    # apply pca
    appearance_model = PCAModel(all_patches)

    # trim components
    if n_appearance_parameters is not None:
        appearance_model.trim_components(n_appearance_parameters)

    # compute covariance matrix
    app_cov = np.eye(appearance_model.n_features, appearance_model.n_features) - appearance_model.components.T.dot(appearance_model.components)

    return app_mean, app_cov
예제 #27
0
def test_pca_variance():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # kept variance must be equal to total variance
    assert_equal(model.variance(), model.original_variance())
    # kept variance ratio must be 1.0
    assert_equal(model.variance_ratio(), 1.0)
    # noise variance must be 0.0
    assert_equal(model.noise_variance(), 0.0)
    # noise variance ratio must be also 0.0
    assert_equal(model.noise_variance_ratio(), 0.0)
def pca_and_weights(meshes, retain_eig_cum_val=0.997, verbose=False):
    model = PCAModel(meshes, verbose=verbose)
    n_comps_retained = (model.eigenvalues_cumulative_ratio() <
                        retain_eig_cum_val).sum()
    if verbose:
        print('\nRetaining {:.2%} of eigenvalues keeps {} components'.format(
            retain_eig_cum_val, n_comps_retained))
    model.trim_components(retain_eig_cum_val)
    if verbose:
        meshes = print_progress(meshes, prefix='Calculating weights')
    weights = (np.vstack([model.project(m)
                          for m in meshes]) / np.sqrt(model.eigenvalues))
    return model, weights
예제 #29
0
    def build(self, images, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.

        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.

        label : `string`, optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from lowest
            to highest level
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            normalization_wrt_reference_shape(images, group, label,
                                              self.normalization_diagonal,
                                              verbose=verbose)

        # create pyramid
        generators = create_pyramid(normalized_images,
                                    self.n_levels,
                                    self.downscale,
                                    self.features,
                                    verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        appearance_models = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters in form of list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get feature images of current level
            feature_images = []
            for c, g in enumerate(generators):
                if verbose:
                    print_dynamic(
                        '{}Computing feature space/rescaling - {}'.format(
                            level_str,
                            progress_bar_str((c + 1.) / len(generators),
                                             show_bar=False)))
                feature_images.append(next(g))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label] for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = build_shape_model(train_shapes,
                                            self.max_shape_components[rj])
            reference_frame = self._build_reference_frame(shape_model.mean())

            # add shape model to the list
            shape_models.append(shape_model)

            # compute transforms
            if verbose:
                print_dynamic('{}Computing transforms'.format(level_str))
            transforms = [
                self.transform(reference_frame.landmarks['source'].lms,
                               i.landmarks[group][label])
                for i in feature_images
            ]

            # warp images to reference frame
            warped_images = []
            for c, (i, t) in enumerate(zip(feature_images, transforms)):
                if verbose:
                    print_dynamic('{}Warping images - {}'.format(
                        level_str,
                        progress_bar_str(float(c + 1) / len(feature_images),
                                         show_bar=False)))
                warped_images.append(i.warp_to_mask(reference_frame.mask, t))

            # attach reference_frame to images' source shape
            for i in warped_images:
                i.landmarks['source'] = reference_frame.landmarks['source']

            # build appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(level_str))
            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[rj] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[rj])

            # add appearance model to the list
            appearance_models.append(appearance_model)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()
        n_training_images = len(images)

        return self._build_aam(shape_models, appearance_models,
                               n_training_images)
예제 #30
0
    def _train(self, images, group=None, verbose=False):
        checks.check_landmark_trilist(images[0], self.transform, group=group)
        self.reference_shape = compute_reference_shape(
            [i.landmarks[group] for i in images],
            self.diagonal,
            verbose=verbose)

        # normalize images
        images = rescale_images_to_reference_shape(images,
                                                   group,
                                                   self.reference_shape,
                                                   verbose=verbose)
        if self.sigma:
            images = [fsmooth(i, self.sigma) for i in images]

        # Build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = images
            elif j == 0 or self.holistic_features[
                    j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(images,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images,
                                             self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group] for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            shape_model = self._build_shape_model(scale_shapes, j)
            self.shape_models.append(shape_model)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              scaled_reference_shape, j,
                                              scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic(
                    '{}Building appearance model'.format(scale_prefix))

            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[j] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[j])
            # add appearance model to the list
            self.appearance_models.append(appearance_model)

            expert_ensemble = self.expert_ensemble_cls[j](
                images=scaled_images,
                shapes=scale_shapes,
                patch_shape=self.patch_shape[j],
                patch_normalisation=self.patch_normalisation,
                cosine_mask=self.cosine_mask,
                context_shape=self.context_shape[j],
                sample_offsets=self.sample_offsets,
                prefix=scale_prefix,
                verbose=verbose)
            self.expert_ensembles.append(expert_ensemble)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
예제 #31
0
def test_pca_init_from_covariance():
    n_samples = 30
    n_features = 10
    n_dims = 2
    centre_values = [True, False]
    for centre in centre_values:
        # generate samples list and convert it to nd.array
        samples = [PointCloud(np.random.randn(n_features, n_dims))
                   for _ in range(n_samples)]
        data, template = as_matrix(samples, return_template=True)
        # compute covariance matrix and mean
        if centre:
            mean_vector = np.mean(data, axis=0)
            mean = template.from_vector(mean_vector)
            X = data - mean_vector
            C = np.dot(X.T, X) / (n_samples - 1)
        else:
            mean = samples[0]
            C = np.dot(data.T, data) / (n_samples - 1)
        # create the 2 pca models
        pca1 = PCAModel.init_from_covariance_matrix(C, mean,
                                                    centred=centre,
                                                    n_samples=n_samples)
        pca2 = PCAModel(samples, centre=centre)
        # compare them
        assert_array_almost_equal(pca1.component_vector(0, with_mean=False),
                                  pca2.component_vector(0, with_mean=False))
        assert_array_almost_equal(pca1.component(7).as_vector(),
                                  pca2.component(7).as_vector())
        assert_array_almost_equal(pca1.components, pca2.components)
        assert_array_almost_equal(pca1.eigenvalues, pca2.eigenvalues)
        assert_array_almost_equal(pca1.eigenvalues_cumulative_ratio(),
                                  pca2.eigenvalues_cumulative_ratio())
        assert_array_almost_equal(pca1.eigenvalues_ratio(),
                                  pca2.eigenvalues_ratio())
        weights = np.random.randn(pca1.n_active_components)
        assert_array_almost_equal(pca1.instance(weights).as_vector(),
                                  pca2.instance(weights).as_vector())
        weights2 = np.random.randn(pca1.n_active_components - 4)
        assert_array_almost_equal(pca1.instance_vector(weights2),
                                  pca2.instance_vector(weights2))
        assert_array_almost_equal(pca1.mean().as_vector(),
                                  pca2.mean().as_vector())
        assert_array_almost_equal(pca1.mean_vector,
                                  pca2.mean_vector)
        assert(pca1.n_active_components == pca2.n_active_components)
        assert(pca1.n_components == pca2.n_components)
        assert(pca1.n_features == pca2.n_features)
        assert(pca1.n_samples == pca2.n_samples)
        assert(pca1.noise_variance() == pca2.noise_variance())
        assert(pca1.noise_variance_ratio() == pca2.noise_variance_ratio())
        assert_almost_equal(pca1.variance(), pca2.variance())
        assert_almost_equal(pca1.variance_ratio(), pca2.variance_ratio())
        assert_array_almost_equal(pca1.whitened_components(),
                                  pca2.whitened_components())
예제 #32
0
def test_pca_inverse_noise_variance():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # inverse noise_variance it's not computable
    model.inverse_noise_variance()
예제 #33
0
def test_pca_n_active_components():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # integer
    model.n_active_components = 5
    assert_equal(model.n_active_components, 5)
예제 #34
0
def test_pca_trim_negative_float():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # no negative number of components
    model.trim_components(-2)
예제 #35
0
    def _train_batch(self,
                     image_batch,
                     increment=False,
                     group=None,
                     verbose=False,
                     shape_forgetting_factor=1.0,
                     appearance_forgetting_factor=1.0):
        r"""
        Builds an Active Appearance Model from a list of landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.
        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from
            lowest to highest scale
        """
        # Rescale to existing reference shape
        image_batch, self.transforms, self.reference_frame, self.n_landmarks, self.n_align_lms,_,_,_,self.reference_shape,self.debug\
            = rescale_images_to_reference_shape(
                image_batch, group, self.reference_shape,
                tight_mask=self.tight_mask, sd=self.shape_desc, target_group=self.target_group,
                verbose=verbose
            )

        self.normalised_img = image_batch

        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[
                    j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if j == 0:
                shape_model = self._build_shape_model(scale_shapes, j)
                self.shape_models.append(shape_model)
            else:
                self.shape_models.append(deepcopy(shape_model))

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            warped_images = self.warped_images = self._warp_images(
                scaled_images, scale_shapes, self.reference_shape, j,
                scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic(
                    '{}Building appearance model'.format(scale_prefix))

            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[j])
            # add appearance model to the list
            self.appearance_models.append(appearance_model)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))

        # Because we just copy the shape model, we need to wait to trim
        # it after building each model. This ensures we can have a different
        # number of components per level
        for j, sm in enumerate(self.shape_models):
            max_sc = self.max_shape_components[j]
            if max_sc is not None:
                sm.trim_components(max_sc)
예제 #36
0
def test_pca_inverse_noise_variance():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # inverse noise_variance it's not computable
    model.inverse_noise_variance
예제 #37
0
def test_pca_trim_negative_float():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # no negative number of components
    model.trim_components(-2)
예제 #38
0
def test_pca_trim_negative_integers():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    with raises(ValueError):
        # no negative number of components
        model.trim_components(-2)
예제 #39
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     verbose=False, shape_forgetting_factor=1.0,
                     appearance_forgetting_factor=1.0):
        r"""
        Builds an Active Appearance Model from a list of landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.
        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from
            lowest to highest scale
        """
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                shape_model = self._build_shape_model(scale_shapes, j)
                self.shape_models.append(shape_model)
            else:
                self._increment_shape_model(
                    scale_shapes, j, forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              scaled_reference_shape,
                                              j, scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(
                    scale_prefix))

            if not increment:
                appearance_model = PCAModel(warped_images)
                # trim appearance model if required
                if self.max_appearance_components is not None:
                    appearance_model.trim_components(
                        self.max_appearance_components[j])
                # add appearance model to the list
                self.appearance_models.append(appearance_model)
            else:
                # increment appearance model
                self.appearance_models[j].increment(
                    warped_images,
                    forgetting_factor=appearance_forgetting_factor)
                # trim appearance model if required
                if self.max_appearance_components is not None:
                    self.appearance_models[j].trim_components(
                        self.max_appearance_components[j])

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
예제 #40
0
def test_pca_trim_variance_limit():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # impossible to keep more than 1.0 ratio variance
    model.trim_components(2.5)
예제 #41
0
def test_pca_project():
    pca_samples = [PointCloud(np.random.randn(10, 2)) for _ in range(10)]
    pca_model = PCAModel(pca_samples)
    projected = pca_model.project(pca_samples[0])
    assert projected.shape[0] == 9
예제 #42
0
    def build(self, images, group=None, label='all'):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images: list of :class:`menpo.image.Image`
            The set of landmarked images from which to build the AAM.

        group : string, Optional
            The key of the landmark set that should be used. If None,
            and if there is only one set of landmarks, this set will be used.

            Default: None

        label: string, Optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

            Default: 'all'

        Returns
        -------
        aam : :class:`menpo.fitmultiple.aam.builder.AAM`
            The AAM object
        """
        print '- Preprocessing'
        self.reference_shape, generator = self._preprocessing(
            images, group, label, self.diagonal_range, self.interpolator,
            self.scaled_levels, self.n_levels, self.downscale)

        print '- Building model pyramids'
        shape_models = []
        appearance_models = []
        # for each level
        for j in np.arange(self.n_levels):
            print ' - Level {}'.format(j)

            print '  - Computing feature space'
            images = [compute_features(g.next(), self.feature_type)
                      for g in generator]
            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label].lms for i in images]

            if j == 0 or self.scaled_levels:
                print '  - Building shape model'
                if j != 0:
                    shapes = [Scale(1/self.downscale,
                                    n_dims=shapes[0].n_dims).apply(s)
                              for s in shapes]
                shape_model = self._build_shape_model(
                    shapes, self.max_shape_components)

                print '  - Building reference frame'
                reference_frame = self._build_reference_frame(
                    shape_model.mean)

            # add shape model to the list
            shape_models.append(shape_model)

            print '  - Computing transforms'
            transforms = [self.transform(reference_frame.landmarks['source'].lms,
                                         i.landmarks[group][label].lms)
                          for i in images]

            print '  - Warping images'
            images = [i.warp_to(reference_frame.mask, t,
                                interpolator=self.interpolator)
                      for i, t in zip(images, transforms)]

            for i in images:
                i.landmarks['source'] = reference_frame.landmarks['source']
                self._mask_image(i)

            print '  - Building appearance model'
            appearance_model = PCAModel(images)
            # trim appearance model if required
            if self.max_appearance_components is not None:
                appearance_model.trim_components(
                    self.max_appearance_components)

            # add appearance model to the list
            appearance_models.append(appearance_model)

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()

        return self._build_aam(shape_models, appearance_models)
예제 #43
0
def test_pca_n_active_components_negative():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # not sufficient components
    model.n_active_components = -5
예제 #44
0
def test_pca_init_from_covariance():
    n_samples = 30
    n_features = 10
    n_dims = 2
    centre_values = [True, False]
    for centre in centre_values:
        # generate samples list and convert it to nd.array
        samples = [
            PointCloud(np.random.randn(n_features, n_dims))
            for _ in range(n_samples)
        ]
        data, template = as_matrix(samples, return_template=True)
        # compute covariance matrix and mean
        if centre:
            mean_vector = np.mean(data, axis=0)
            mean = template.from_vector(mean_vector)
            X = data - mean_vector
            C = np.dot(X.T, X) / (n_samples - 1)
        else:
            mean = samples[0]
            C = np.dot(data.T, data) / (n_samples - 1)
        # create the 2 pca models
        pca1 = PCAModel.init_from_covariance_matrix(C,
                                                    mean,
                                                    centred=centre,
                                                    n_samples=n_samples)
        pca2 = PCAModel(samples, centre=centre)
        # compare them
        assert_array_almost_equal(
            pca1.component_vector(0, with_mean=False),
            pca2.component_vector(0, with_mean=False),
        )
        assert_array_almost_equal(
            pca1.component(7).as_vector(),
            pca2.component(7).as_vector())
        assert_array_almost_equal(pca1.components, pca2.components)
        assert_array_almost_equal(pca1.eigenvalues, pca2.eigenvalues)
        assert_array_almost_equal(pca1.eigenvalues_cumulative_ratio(),
                                  pca2.eigenvalues_cumulative_ratio())
        assert_array_almost_equal(pca1.eigenvalues_ratio(),
                                  pca2.eigenvalues_ratio())
        weights = np.random.randn(pca1.n_active_components)
        assert_array_almost_equal(
            pca1.instance(weights).as_vector(),
            pca2.instance(weights).as_vector())
        weights2 = np.random.randn(pca1.n_active_components - 4)
        assert_array_almost_equal(pca1.instance_vector(weights2),
                                  pca2.instance_vector(weights2))
        assert_array_almost_equal(pca1.mean().as_vector(),
                                  pca2.mean().as_vector())
        assert_array_almost_equal(pca1.mean_vector, pca2.mean_vector)
        assert pca1.n_active_components == pca2.n_active_components
        assert pca1.n_components == pca2.n_components
        assert pca1.n_features == pca2.n_features
        assert pca1.n_samples == pca2.n_samples
        assert pca1.noise_variance() == pca2.noise_variance()
        assert pca1.noise_variance_ratio() == pca2.noise_variance_ratio()
        assert_almost_equal(pca1.variance(), pca2.variance())
        assert_almost_equal(pca1.variance_ratio(), pca2.variance_ratio())
        assert_array_almost_equal(pca1.whitened_components(),
                                  pca2.whitened_components())
예제 #45
0
def test_pca_project():
    pca_samples = [PointCloud(np.random.randn(10, 2)) for _ in range(10)]
    pca_model = PCAModel(pca_samples)
    projected = pca_model.project(pca_samples[0])
    assert projected.shape[0] == 9
예제 #46
0
    def build(self, images, group=None, label=None, verbose=False):
        # compute reference shape
        reference_shape = self._compute_reference_shape(images, group, label,
                                                        verbose)
        # normalize images
        images = self._normalize_images(images, group, label, reference_shape,
                                        verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')
        shape_models = []
        appearance_models = []
        # for each pyramid level (high --> low)
        for j, s in enumerate(self.scales):
            if verbose:
                if len(self.scales) > 1:
                    level_str = '  - Level {}: '.format(j)
                else:
                    level_str = '  - '

            # obtain image representation
            if j == 0:
                # compute features at highest level
                feature_images = self._compute_features(images, level_str,
                                                        verbose)
                level_images = feature_images
            elif self.scale_features:
                # scale features at other levels
                level_images = self._scale_images(feature_images, s,
                                                  level_str, verbose)
            else:
                # scale images and compute features at other levels
                scaled_images = self._scale_images(images, s, level_str,
                                                   verbose)
                level_images = self._compute_features(scaled_images,
                                                      level_str, verbose)

            # extract potentially rescaled shapes ath highest level
            level_shapes = [i.landmarks[group][label]
                            for i in level_images]

            # obtain shape representation
            if j == 0 or self.scale_shapes:
                # obtain shape model
                if verbose:
                    print_dynamic('{}Building shape model'.format(level_str))
                shape_model = self._build_shape_model(
                    level_shapes, self.max_shape_components)
                # add shape model to the list
                shape_models.append(shape_model)
            else:
                # copy precious shape model and add it to the list
                shape_models.append(deepcopy(shape_model))

            # obtain warped images
            warped_images = self._warp_images(level_images, level_shapes,
                                              shape_model.mean, level_str,
                                              verbose)

            # obtain appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(level_str))
            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components is not None:
                appearance_model.trim_components(
                    self.max_appearance_components)
            # add appearance model to the list
            appearance_models.append(appearance_model)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()
        self.scales.reverse()

        aam = self._build_aam(shape_models, appearance_models, reference_shape)

        return aam
예제 #47
0
파일: base.py 프로젝트: jabooth/menpofit
    def _train(self, images, group=None, verbose=False):
        checks.check_landmark_trilist(images[0], self.transform, group=group)
        self.reference_shape = compute_reference_shape(
            [i.landmarks[group] for i in images],
            self.diagonal, verbose=verbose)
        
        # normalize images
        images = rescale_images_to_reference_shape(
            images, group, self.reference_shape, verbose=verbose)
        if self.sigma:
            images = [fsmooth(i, self.sigma) for i in images]

        # Build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = images
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(images,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group] for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            shape_model = self._build_shape_model(scale_shapes, j)
            self.shape_models.append(shape_model)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              scaled_reference_shape,
                                              j, scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(
                    scale_prefix))

            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[j] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[j])
            # add appearance model to the list
            self.appearance_models.append(appearance_model)

            expert_ensemble = self.expert_ensemble_cls[j](
                images=scaled_images, shapes=scale_shapes,
                patch_shape=self.patch_shape[j],
                patch_normalisation=self.patch_normalisation,
                cosine_mask=self.cosine_mask,
                context_shape=self.context_shape[j],
                sample_offsets=self.sample_offsets,
                prefix=scale_prefix, verbose=verbose)
            self.expert_ensembles.append(expert_ensemble)

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))
예제 #48
0
def test_pca_n_active_components():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # integer
    model.n_active_components = 5
    assert_equal(model.n_active_components, 5)
예제 #49
0
    def _train_batch(self, image_batch, increment=False, group=None,
                     verbose=False, shape_forgetting_factor=1.0,
                     appearance_forgetting_factor=1.0):
        r"""
        Builds an Active Appearance Model from a list of landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.
        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.
        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from
            lowest to highest scale
        """
        # Rescale to existing reference shape
        image_batch = rescale_images_to_reference_shape(
            image_batch, group, self.reference_shape,
            verbose=verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')

        feature_images = []
        # for each scale (low --> high)
        for j in range(self.n_scales):
            if verbose:
                if len(self.scales) > 1:
                    scale_prefix = '  - Scale {}: '.format(j)
                else:
                    scale_prefix = '  - '
            else:
                scale_prefix = None

            # Handle holistic features
            if j == 0 and self.holistic_features[j] == no_op:
                # Saves a lot of memory
                feature_images = image_batch
            elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
                # Compute features only if this is the first pass through
                # the loop or the features at this scale are different from
                # the features at the previous scale
                feature_images = compute_features(image_batch,
                                                  self.holistic_features[j],
                                                  prefix=scale_prefix,
                                                  verbose=verbose)
            # handle scales
            if self.scales[j] != 1:
                # Scale feature images only if scale is different than 1
                scaled_images = scale_images(feature_images, self.scales[j],
                                             prefix=scale_prefix,
                                             verbose=verbose)
            else:
                scaled_images = feature_images

            # Extract potentially rescaled shapes
            scale_shapes = [i.landmarks[group].lms for i in scaled_images]

            # Build the shape model
            if verbose:
                print_dynamic('{}Building shape model'.format(scale_prefix))

            if not increment:
                if j == 0:
                    shape_model = self._build_shape_model(
                        scale_shapes, j)
                    self.shape_models.append(shape_model)
                else:
                    self.shape_models.append(deepcopy(shape_model))
            else:
                self._increment_shape_model(
                    scale_shapes,  self.shape_models[j],
                    forgetting_factor=shape_forgetting_factor)

            # Obtain warped images - we use a scaled version of the
            # reference shape, computed here. This is because the mean
            # moves when we are incrementing, and we need a consistent
            # reference frame.
            scaled_reference_shape = Scale(self.scales[j], n_dims=2).apply(
                self.reference_shape)
            warped_images = self._warp_images(scaled_images, scale_shapes,
                                              scaled_reference_shape,
                                              j, scale_prefix, verbose)

            # obtain appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(
                    scale_prefix))

            if not increment:
                appearance_model = PCAModel(warped_images)
                # trim appearance model if required
                if self.max_appearance_components is not None:
                    appearance_model.trim_components(
                        self.max_appearance_components[j])
                # add appearance model to the list
                self.appearance_models.append(appearance_model)
            else:
                # increment appearance model
                self.appearance_models[j].increment(
                    warped_images,
                    forgetting_factor=appearance_forgetting_factor)
                # trim appearance model if required
                if self.max_appearance_components is not None:
                    self.appearance_models[j].trim_components(
                        self.max_appearance_components[j])

            if verbose:
                print_dynamic('{}Done\n'.format(scale_prefix))

        # Because we just copy the shape model, we need to wait to trim
        # it after building each model. This ensures we can have a different
        # number of components per level
        for j, sm in enumerate(self.shape_models):
            max_sc = self.max_shape_components[j]
            if max_sc is not None:
                sm.trim_components(max_sc)
예제 #50
0
def test_pca_n_active_components_negative():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # not sufficient components
    model.n_active_components = -5
예제 #51
0
파일: lsfm.py 프로젝트: conansherry/menpo3d
def lsfm_model_importer(path, **kwargs):
    m = loadmat(str(path))
    mean = TriMesh(m["mean"].reshape([-1, 3]), trilist=m["trilist"])
    return PCAModel.init_from_components(m["components"].T,
                                         m["eigenvalues"].ravel(), mean,
                                         m["n_training_samples"], True)
예제 #52
0
def test_pca_trim_variance_limit():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    # impossible to keep more than 1.0 ratio variance
    model.trim_components(2.5)
예제 #53
0
파일: lsfm.py 프로젝트: HaoyangWang/menpo3d
def lsfm_model_importer(path, **kwargs):
    m = loadmat(str(path))
    mean = TriMesh(m['mean'].reshape([-1, 3]), trilist=m['trilist'])
    return PCAModel.init_from_components(m['components'].T,
                                         m['eigenvalues'].ravel(),
                                         mean, m['n_training_samples'], True)
예제 #54
0
    def build(self, images, group=None, label=None, verbose=False, **kwargs):
        # compute reference shape
        reference_shape = self._compute_reference_shape(images, group, label,
                                                        verbose)
        # normalize images
        images = self._normalize_images(images, group, label, reference_shape,
                                        verbose)

        # build models at each scale
        if verbose:
            print_dynamic('- Building models\n')
        shape_models = []
        appearance_models = []
        classifiers = []
        # for each pyramid level (high --> low)
        for j, s in enumerate(self.scales):
            if verbose:
                if len(self.scales) > 1:
                    level_str = '  - Level {}: '.format(j)
                else:
                    level_str = '  - '

            # obtain image representation
            if j == 0:
                # compute features at highest level
                feature_images = self._compute_features(images, level_str,
                                                        verbose)
                level_images = feature_images
            elif self.scale_features:
                # scale features at other levels
                level_images = self._scale_images(feature_images, s,
                                                  level_str, verbose)
            else:
                # scale images and compute features at other levels
                scaled_images = self._scale_images(images, s, level_str,
                                                   verbose)
                level_images = self._compute_features(scaled_images,
                                                      level_str, verbose)

            # extract potentially rescaled shapes ath highest level
            level_shapes = [i.landmarks[group][label]
                            for i in level_images]

            # obtain shape representation
            if j == 0 or self.scale_shapes:
                # obtain shape model
                if verbose:
                    print_dynamic('{}Building shape model'.format(level_str))
                shape_model = self._build_shape_model(
                    level_shapes, self.max_shape_components)
                # add shape model to the list
                shape_models.append(shape_model)
            else:
                # copy precious shape model and add it to the list
                shape_models.append(deepcopy(shape_model))

            # obtain warped images
            warped_images = self._warp_images(level_images, level_shapes,
                                              shape_model.mean(), level_str,
                                              verbose)

            # obtain appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(level_str))
            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components is not None:
                appearance_model.trim_components(
                    self.max_appearance_components)
            # add appearance model to the list
            appearance_models.append(appearance_model)

            if isinstance(self, GlobalUnifiedBuilder):
                # obtain parts images
                parts_images = self._parts_images(level_images, level_shapes,
                                                  level_str, verbose)
            else:
                # parts images are warped images
                parts_images = warped_images

            # build desired responses
            mvn = multivariate_normal(mean=np.zeros(2), cov=self.covariance)
            grid = build_sampling_grid(self.parts_shape)
            Y = [mvn.pdf(grid + offset) for offset in self.offsets]

            # build classifiers
            n_landmarks = level_shapes[0].n_points
            level_classifiers = []
            for l in range(n_landmarks):
                if verbose:
                    print_dynamic('{}Building classifiers - {}'.format(
                        level_str,
                        progress_bar_str((l + 1.) / n_landmarks,
                                         show_bar=False)))

                X = [i.pixels[l] for i in parts_images]

                clf = self.classifier(X, Y, **kwargs)
                level_classifiers.append(clf)

            # build Multiple classifier
            if self.classifier is MCF:
                multiple_clf = MultipleMCF(level_classifiers)
            elif self.classifier is LinearSVMLR:
                multiple_clf = MultipleLinearSVMLR(level_classifiers)

            # add appearance model to the list
            classifiers.append(multiple_clf)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()
        classifiers.reverse()
        self.scales.reverse()

        unified = self._build_unified(shape_models, appearance_models,
                                      classifiers, reference_shape)

        return unified
예제 #55
0
파일: base.py 프로젝트: ikassi/menpo
def aam_builder(images, group=None, label='all', interpolator='scipy',
                diagonal_range=None, boundary=3,
                transform_cls=PiecewiseAffineTransform,
                trilist=None, patch_size=None, n_levels=3, downscale=2,
                scaled_reference_frames=False, feature_type=None,
                max_shape_components=None, max_appearance_components=None):

    r"""
    Builds an AAM object from a set of landmarked images.

    Parameters
    ----------
    images: list of :class:`menpo.image.Image`
        The set of landmarked images from which to build the AAM.

    group : string, Optional
        The key of the landmark set that should be used. If None,
        and if there is only one set of landmarks, this set will be used.

        Default: None

    label: string, Optional
        The label of of the landmark manager that you wish to use. If no
        label is passed, the convex hull of all landmarks is used.

        Default: 'all'

    interpolator:'scipy', Optional
        The interpolator that should be used to perform the warps.

        Default: 'scipy'

    diagonal_range: int, Optional
        All images will be rescaled to ensure that the scale of their
        landmarks matches the scale of the mean shape.

        If int, ensures that the mean shape is scaled so that
        the diagonal of the bounding box containing it matches the
        diagonal_range value.
        If None, the mean landmarks are not rescaled.

        Note that, because the reference frame is computed from the mean
        landmarks, this kwarg also specifies the diagonal length of the
        reference frame (provided that features computation does not change
        the image size).

        Default: None

    boundary: int, Optional
        The number of pixels to be left as a safe margin on the boundaries
        of the reference frame (has potential effects on the gradient
        computation).

        Default: 3

    transform_cls: :class:`menpo.transform.PureAlignmentTransform`, Optional
        The :class:`menpo.transform.PureAlignmentTransform` that will be
        used to warp the images.

        Default: :class:`menpo.transform.PiecewiseAffineTransform`

    trilist: (t, 3) ndarray, Optional
        Triangle list that will be used to build the reference frame. If None,
        defaults to performing Delaunay triangulation on the points.

        Default: None

        .. note::

            This kwarg will be completely ignored if the kwarg transform_cls
            is not set :class:`menpo.transform.PiecewiseAffineTransform` or
            if the kwarg patch_size is not set to None.

    patch_size: tuple of ints or None, Optional
        If tuple, the appearance model of the AAM will be obtained by
        sampling the appearance patches around the landmarks. If None, the
        standard representation for the AAMs' appearance model will be used
        instead.

        Default: None

        .. note::

            If tuple, the kwarg transform_cls will be automatically set to
            :class:`menpo.transform.TPS`.

    n_levels: int, Optional
        The number of multi-resolution pyramidal levels to be used.

        Default: 3

    downscale: float > 1, Optional
        The downscale factor that will be used to create the different AAM
        pyramidal levels.

        Default: 2

    scaled_reference_frames: boolean, Optional
        If False, the resolution of all reference frames used to build the
        appearance model will be fixed (the original images will be
        both smoothed and scaled using a Gaussian pyramid). Consequently, all
        appearance models will have the same dimensionality.
        If True, the reference frames used to create the appearance model
        will be themselves scaled (the original images will only be smoothed).
        Consequently, the dimensionality of all appearance models will be
        different.

        Default: False

    feature_type: string or closure, Optional
        If None, the appearance model will be build using the original image
        representation, i.e. no features will be extracted from the original
        images.
        If string or closure, the appearance model will be built from a
        feature representation of the original images:
            If string, the `ammbuilder` will try to compute image features by
            executing:

               feature_image = eval('img.feature_type.' + feature_type + '()')

            For this to work properly the feature_type needs to be one of
            Menpo's standard image feature methods. Note that, in this case,
            the feature computation will be carried out using the respective
            default options.

            Non-default feature options and new experimental features can be
            used by defining a closure. In this case, the closure must define a
            function that receives an image as input and returns a
            particular feature representation of that image. For example:

                def igo_double_from_std_normalized_intensities(image)
                    image = deepcopy(image)
                    image.normalize_std_inplace()
                    return image.feature_type.igo(double_angles=True)

            See `menpo.image.MaskedNDImage` for details more details on Menpo's
            standard image features and feature options.

        Default: None

    max_shape_components: 0 < int < n_components, Optional
        If int, it specifies the specific number of components of the
        original shape model to be retained.

        Default: None

    max_appearance_components: 0 < int < n_components, Optional
        If int, it specifies the specific number of components of the
        original appearance model to be retained.

        Default: None

    Returns
    -------
    aam : :class:`menpo.aam.AAM`
        The AAM object
    """

    if patch_size is not None:
        transform_cls = TPS

    print '- Rescaling images'
    shapes = [i.landmarks[group][label].lms for i in images]
    reference_shape = mean_pointcloud(shapes)
    if diagonal_range:
        x, y = reference_shape.range()
        scale = diagonal_range / np.sqrt(x**2 + y**2)
        Scale(scale, reference_shape.n_dims).apply_inplace(reference_shape)
    images = [i.rescale_to_reference_shape(reference_shape, group=group,
                                           label=label,
                                           interpolator=interpolator)
              for i in images]

    if scaled_reference_frames:
        print '- Setting gaussian smoothing generators'
        generator = [i.smoothing_pyramid(n_levels=n_levels,
                                         downscale=downscale)
                     for i in images]
    else:
        print '- Setting gaussian pyramid generators'
        generator = [i.gaussian_pyramid(n_levels=n_levels,
                                        downscale=downscale)
                     for i in images]

    print '- Building model pyramids'
    shape_models = []
    appearance_models = []
    # for each level
    for j in np.arange(n_levels):
        print ' - Level {}'.format(j)

        print '  - Computing feature_type'
        images = [compute_features(g.next(), feature_type) for g in generator]
        # extract potentially rescaled shapes
        shapes = [i.landmarks[group][label].lms for i in images]

        if scaled_reference_frames or j == 0:
            print '  - Building shape model'
            if j != 0:
                shapes = [Scale(1/downscale, n_dims=shapes[0].n_dims).apply(s)
                          for s in shapes]
            # centralize shapes
            centered_shapes = [Translation(-s.centre).apply(s) for s in shapes]
            # align centralized shape using Procrustes Analysis
            gpa = GeneralizedProcrustesAnalysis(centered_shapes)
            aligned_shapes = [s.aligned_source for s in gpa.transforms]

            # build shape model
            shape_model = PCAModel(aligned_shapes)
            if max_shape_components is not None:
                # trim shape model if required
                shape_model.trim_components(max_shape_components)

            print '  - Building reference frame'
            mean_shape = mean_pointcloud(aligned_shapes)
            if patch_size is not None:
                # build patch based reference frame
                reference_frame = build_patch_reference_frame(
                    mean_shape, boundary=boundary, patch_size=patch_size)
            else:
                # build reference frame
                reference_frame = build_reference_frame(
                    mean_shape, boundary=boundary, trilist=trilist)

        # add shape model to the list
        shape_models.append(shape_model)

        print '  - Computing transforms'
        transforms = [transform_cls(reference_frame.landmarks['source'].lms,
                                    i.landmarks[group][label].lms)
                      for i in images]

        print '  - Warping images'
        images = [i.warp_to(reference_frame.mask, t,
                            interpolator=interpolator)
                  for i, t in zip(images, transforms)]

        for i in images:
            i.landmarks['source'] = reference_frame.landmarks['source']
        if patch_size:
            for i in images:
                i.build_mask_around_landmarks(patch_size, group='source')
        else:
            for i in images:
                i.constrain_mask_to_landmarks(group='source', trilist=trilist)

        print '  - Building appearance model'
        appearance_model = PCAModel(images)
        # trim appearance model if required
        if max_appearance_components is not None:
            appearance_model.trim_components(max_appearance_components)

        # add appearance model to the list
        appearance_models.append(appearance_model)

    # reverse the list of shape and appearance models so that they are
    # ordered from lower to higher resolution
    shape_models.reverse()
    appearance_models.reverse()

    return AAM(shape_models, appearance_models, transform_cls, feature_type,
               reference_shape, downscale, patch_size, interpolator)
예제 #56
0
    def build(self, images, group=None, label=None, verbose=False):
        r"""
        Builds a Multilevel Active Appearance Model from a list of
        landmarked images.

        Parameters
        ----------
        images : list of :map:`MaskedImage`
            The set of landmarked images from which to build the AAM.

        group : `string`, optional
            The key of the landmark set that should be used. If ``None``,
            and if there is only one set of landmarks, this set will be used.

        label : `string`, optional
            The label of of the landmark manager that you wish to use. If no
            label is passed, the convex hull of all landmarks is used.

        verbose : `boolean`, optional
            Flag that controls information and progress printing.

        Returns
        -------
        aam : :map:`AAM`
            The AAM object. Shape and appearance models are stored from lowest
            to highest level
        """
        # compute reference_shape and normalize images size
        self.reference_shape, normalized_images = \
            normalization_wrt_reference_shape(images, group, label,
                                              self.normalization_diagonal,
                                              verbose=verbose)

        # create pyramid
        generators = create_pyramid(normalized_images, self.n_levels,
                                    self.downscale, self.features,
                                    verbose=verbose)

        # build the model at each pyramid level
        if verbose:
            if self.n_levels > 1:
                print_dynamic('- Building model for each of the {} pyramid '
                              'levels\n'.format(self.n_levels))
            else:
                print_dynamic('- Building model\n')

        shape_models = []
        appearance_models = []
        # for each pyramid level (high --> low)
        for j in range(self.n_levels):
            # since models are built from highest to lowest level, the
            # parameters in form of list need to use a reversed index
            rj = self.n_levels - j - 1

            if verbose:
                level_str = '  - '
                if self.n_levels > 1:
                    level_str = '  - Level {}: '.format(j + 1)

            # get feature images of current level
            feature_images = []
            for c, g in enumerate(generators):
                if verbose:
                    print_dynamic(
                        '{}Computing feature space/rescaling - {}'.format(
                        level_str,
                        progress_bar_str((c + 1.) / len(generators),
                                         show_bar=False)))
                feature_images.append(next(g))

            # extract potentially rescaled shapes
            shapes = [i.landmarks[group][label] for i in feature_images]

            # define shapes that will be used for training
            if j == 0:
                original_shapes = shapes
                train_shapes = shapes
            else:
                if self.scaled_shape_models:
                    train_shapes = shapes
                else:
                    train_shapes = original_shapes

            # train shape model and find reference frame
            if verbose:
                print_dynamic('{}Building shape model'.format(level_str))
            shape_model = build_shape_model(
                train_shapes, self.max_shape_components[rj])
            reference_frame = self._build_reference_frame(shape_model.mean())

            # add shape model to the list
            shape_models.append(shape_model)

            # compute transforms
            if verbose:
                print_dynamic('{}Computing transforms'.format(level_str))


            # Create a dummy initial transform
            s_to_t_transform = self.transform(
                reference_frame.landmarks['source'].lms,
                reference_frame.landmarks['source'].lms)

            # warp images to reference frame
            warped_images = []
            for c, i in enumerate(feature_images):
                if verbose:
                    print_dynamic('{}Warping images - {}'.format(
                        level_str,
                        progress_bar_str(float(c + 1) / len(feature_images),
                                         show_bar=False)))
                # Setting the target can be significantly faster for transforms
                # such as CachedPiecewiseAffine
                s_to_t_transform.set_target(i.landmarks[group][label])
                warped_images.append(i.warp_to_mask(reference_frame.mask,
                                                    s_to_t_transform))

            # attach reference_frame to images' source shape
            for i in warped_images:
                i.landmarks['source'] = reference_frame.landmarks['source']

            # build appearance model
            if verbose:
                print_dynamic('{}Building appearance model'.format(level_str))
            appearance_model = PCAModel(warped_images)
            # trim appearance model if required
            if self.max_appearance_components[rj] is not None:
                appearance_model.trim_components(
                    self.max_appearance_components[rj])

            # add appearance model to the list
            appearance_models.append(appearance_model)

            if verbose:
                print_dynamic('{}Done\n'.format(level_str))

        # reverse the list of shape and appearance models so that they are
        # ordered from lower to higher resolution
        shape_models.reverse()
        appearance_models.reverse()
        n_training_images = len(images)

        return self._build_aam(shape_models, appearance_models,
                               n_training_images)
예제 #57
0
def test_pca_trim_negative_integers():
    samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
    model = PCAModel(samples)
    with raises(ValueError):
        # no negative number of components
        model.trim_components(-2)