def _attach_predictions(template_name, pixels, coordinates, heatmaps,
                        labeller_fn, show_input_images, show_combined_heatmap,
                        show_individual_heatmaps, index_to_label_fn):
    input_image = Image.init_from_channels_at_back(pixels)
    input_image.landmarks['predictions'] = PointCloud(coordinates)
    labeller(input_image, 'predictions', labeller_fn)
    del input_image.landmarks['predictions']
    images = []

    if show_input_images:
        images.append(input_image)

    if show_combined_heatmap:
        combined_heatmap = np.sum(heatmaps, axis=-1) * 255.0
        combined_heatmap = Image(combined_heatmap)
        combined_heatmap.landmarks[template_name] = input_image.landmarks[
            template_name]
        images.append(combined_heatmap)

    if show_individual_heatmaps:
        for i in range(heatmaps.shape[-1]):
            heatmap = heatmaps[..., i] * 255.0
            heatmap = Image(heatmap)

            if index_to_label_fn is not None:
                label = index_to_label_fn(i)
                #print(label)
                heatmap.landmarks[label] = PointCloud([coordinates[i]])

            images.append(heatmap)

    return images
def _visualise_predictions(input_image, heatmaps, coordinates):
    group_sizes = [17, 17]
    group_labels = ['Endocardium', 'Epicardium']

    plt.figure()

    # input image
    menpo_image = Image.init_from_channels_at_back(input_image)
    menpo_image.landmarks['predictions'] = PointCloud(coordinates)
    labeller(menpo_image, 'predictions', left_ventricle_34)
    del menpo_image.landmarks['predictions']
    rasterised_image = menpo_image.rasterize_landmarks(group='lv_34')

    ax_input_image = plt.subplot2grid((7, 14), (0, 0), colspan=6, rowspan=6)
    ax_input_image.imshow(rasterised_image.pixels_with_channels_at_back())

    index = 0
    heatmap_plots = []

    # plot individual predictions
    for i in range(len(group_sizes)):
        for j in range(group_sizes[i]):
            axis = plt.subplot2grid((7, 14), (i, 7 + j))
            axis.imshow(heatmaps[..., index])
            index += 1

            heatmap_plots.append(axis)

    add_group_labels(heatmap_plots, group_labels, group_sizes)
    make_ticklabels_invisible(heatmap_plots)

    plt.show()
示例#3
0
def train_aic_rlms(trainset, output, n_train_imgs=None):
    training_images = []
    # load landmarked images
    for i in mio.import_images(Path(trainset) / '*', verbose=True, max_images=n_train_imgs):
        # crop image
        i = i.crop_to_landmarks_proportion(0.5)
        labeller(i, 'PTS', face_ibug_68_to_face_ibug_66_trimesh)
        # convert it to greyscale if needed
        if i.n_channels == 3:
            i = i.as_greyscale(mode='average')
        # append it to the list
        training_images.append(i)

    offsets = np.meshgrid(range(-0, 1, 1), range(-0, 1, 1))
    offsets = np.asarray([offsets[0].flatten(), offsets[1].flatten()]).T 

    np.seterr(divide ='ignore')
    np.seterr(invalid ='ignore')    
    
    unified = UnifiedAAMCLM(training_images, 
                            parts_shape=(17, 17),
                            offsets=offsets,
                            group = test_group, 
                            holistic_features=fast_dsift, 
                            diagonal=100, 
                            scales=(1, .5), 
                            max_appearance_components = min(50,int(n_train_imgs/2)),
                            verbose=True) 

    n_appearance=[min(25,int(n_train_imgs/2)), min(50,int(n_train_imgs/2))]
    fitter = UnifiedAAMCLMFitter(unified, algorithm_cls=AICRLMS, n_shape=[3, 12], n_appearance=n_appearance)
    return fitter
示例#4
0
def process(image, crop_proportion=0.2, max_diagonal=400):
    if image.n_channels == 3:
        image = image.as_greyscale()
    image = image.crop_to_landmarks_proportion(crop_proportion)
    d = image.diagonal()
    if d > max_diagonal:
        image = image.rescale(float(max_diagonal) / d)
    labeller(image, 'PTS', face_ibug_68_to_face_ibug_68_trimesh)
    return image
示例#5
0
def load_test_data(testset, n_test_imgs=None):
    test_images = []
    for i in mio.import_images(Path(testset), verbose=True, max_images=n_test_imgs):    
        i = i.crop_to_landmarks_proportion(0.5)
        labeller(i, 'PTS', face_ibug_68_to_face_ibug_66_trimesh)
        if i.n_channels == 3:
            i = i.as_greyscale(mode='average')
        test_images.append(i)

    return test_images
def AAMModel(path, max_shape=None, max_appearance=None):
    training_images = []
    for img in print_progress(mio.import_images(path, verbose=True)):
        labeller(img, 'PTS', face_ibug_68_to_face_ibug_68_trimesh)
        training_images.append(img)
    aam_model = HolisticAAM(training_images,
                            group='face_ibug_68_trimesh',
                            scales=(0.5, 1.0),
                            holistic_features=fast_dsift,
                            verbose=True,
                            max_shape_components=max_shape,
                            max_appearance_components=max_appearance)
    return aam_model, training_images
示例#7
0
def test_squared_even_patches_sample_offsets():
    image = mio.import_builtin_asset('breakingbad.jpg')
    image = labeller(image, 'PTS', ibug_face_68)
    sample_offsets = np.array([[0, 0], [1, 0]])
    patches = image.extract_patches(image.landmarks['PTS'].lms,
                                    sample_offsets=sample_offsets)
    assert_equals(len(patches), 136)
def test_squared_even_patches_sample_offsets():
    image = mio.import_builtin_asset('breakingbad.jpg')
    image = labeller(image, 'PTS', ibug_face_68)
    sample_offsets = PointCloud([[0, 0], [1, 0]])
    patches = image.extract_patches(image.landmarks['PTS'].lms,
                                    sample_offsets=sample_offsets)
    assert_equals(len(patches), 136)
def test_squared_even_patches_single_array():
    image = mio.import_builtin_asset('breakingbad.jpg')
    image = labeller(image, 'PTS', ibug_face_68)
    patch_shape = (16, 16)
    patches = image.extract_patches(image.landmarks['PTS'].lms,
                                    as_single_array=True,
                                    patch_size=patch_shape)
    assert_equals(patches.shape, ((68, 1, 3) + patch_shape))
示例#10
0
 def loadImages(self, path_to_training_images):
     training_images = []
     for img in print_progress(mio.import_images(path_to_training_images, verbose=True)):
         # convert to greyscale
         if img.n_channels == 3:
             img = img.as_greyscale()
         # crop to landmarks bounding box with an extra 20% padding
         img = img.crop_to_landmarks_proportion(0.2)
         # rescale image if its diagonal is bigger than 400 pixels
         d = img.diagonal()
         if d > 400:
             img = img.rescale(400.0 / d)
         # define a TriMesh which will be useful for Piecewise Affine Warp of HolisticAAM
         labeller(img, 'PTS', face_ibug_68_to_face_ibug_68_trimesh)
         # append to list
         training_images.append(img)
     return training_images
示例#11
0
def test_squared_even_patches_single_array():
    image = mio.import_builtin_asset('breakingbad.jpg')
    image = labeller(image, 'PTS', ibug_face_68)
    patch_shape = (16, 16)
    patches = image.extract_patches(image.landmarks['PTS'].lms,
                                    as_single_array=True,
                                    patch_size=patch_shape)
    assert_equals(patches.shape, ((68, 1, 3) + patch_shape))
def test_squared_even_patches_landmarks_label():
    image = mio.import_builtin_asset('breakingbad.jpg')
    image = labeller(image, 'PTS', ibug_face_68)
    patch_shape = (16, 16)
    patches = image.extract_patches_around_landmarks('ibug_face_68',
                                                     label='nose',
                                                     patch_size=patch_shape)
    assert_equals(len(patches), 9)
示例#13
0
def test_squared_even_patches_landmarks_label():
    image = mio.import_builtin_asset('breakingbad.jpg')
    image = labeller(image, 'PTS', ibug_face_68)
    patch_shape = (16, 16)
    patches = image.extract_patches_around_landmarks('ibug_face_68',
                                                     label='nose',
                                                     patch_size=patch_shape)
    assert_equals(len(patches), 9)
示例#14
0
def load_database(path_to_images,
                  save_path,
                  db_name,
                  crop_percentage,
                  fast,
                  group,
                  verbose=False):
    # create filename
    if group is not None:
        filename = (db_name + '_' + group.__name__ + '_crop' +
                    str(int(crop_percentage * 100)))
    else:
        filename = db_name + 'PTS' + '_crop' + str(int(crop_percentage * 100))
    if fast:
        filename += '_menpofast.pickle'
    else:
        filename += '_menpo.pickle'
    save_path = os.path.join(save_path, filename)

    # check if file exists
    if file_exists(save_path):
        if verbose:
            print_dynamic('Loading images...')
        images = pickle_load(save_path)
        if verbose:
            print_dynamic('Images Loaded.')
    else:
        # load images
        images = []
        for i in mio.import_images(path_to_images, verbose=verbose):
            if fast:
                i = convert_from_menpo(i)
            i.crop_to_landmarks_proportion_inplace(crop_percentage,
                                                   group='PTS')
            if group is not None:
                labeller(i, 'PTS', group)
            if i.n_channels == 3:
                i = i.as_greyscale(mode='average')
            images.append(i)

        # save images
        pickle_dump(images, save_path)

    # return images
    return images
示例#15
0
def load_database(path_to_images, save_path, db_name, crop_percentage,
                  fast, group, verbose=False):
    # create filename
    if group is not None:
        filename = (db_name + '_' + group.__name__ + '_crop' +
                    str(int(crop_percentage * 100)))
    else:
        filename = db_name + 'PTS' + '_crop' + str(int(crop_percentage * 100))
    if fast:
        filename += '_menpofast.pickle'
    else:
        filename += '_menpo.pickle'
    save_path = os.path.join(save_path, filename)

    # check if file exists
    if file_exists(save_path):
        if verbose:
            print_dynamic('Loading images...')
        images = pickle_load(save_path)
        if verbose:
            print_dynamic('Images Loaded.')
    else:
        # load images
        images = []
        for i in mio.import_images(path_to_images, verbose=verbose):
            if fast:
                i = convert_from_menpo(i)
            i.crop_to_landmarks_proportion_inplace(crop_percentage, group='PTS')
            if group is not None:
                labeller(i, 'PTS', group)
            if i.n_channels == 3:
                i = i.as_greyscale(mode='average')
            images.append(i)

        # save images
        pickle_dump(images, save_path)

    # return images
    return images
示例#16
0
                  [247.96232402, 264.81933552], [265.63001359, 240.76240374],
                  [273.7321494, 219.23983464], [275.94833733, 203.23538213],
                  [276.43082796, 187.79108987], [271.33176225, 170.34611298],
                  [258.50633904, 164.92193012], [256.68032211, 190.02252505],
                  [253.34318274, 202.62322841], [250.64136836, 216.39925191],
                  [248.92192186, 254.44710508], [257.03785057, 217.075461],
                  [260.28050441, 202.86155077], [261.39108462,
                                                 187.78257369]])))

# load images
filenames = ['breakingbad.jpg', 'takeo.ppm', 'lenna.png', 'einstein.jpg']
training_images = []
for i in range(4):
    im = mio.import_builtin_asset(filenames[i])
    im.crop_to_landmarks_proportion_inplace(0.1)
    labeller(im, 'PTS', ibug_68_trimesh)
    if im.n_channels == 3:
        im = im.as_greyscale(mode='luminosity')
    training_images.append(im)

# Seed the random number generator
np.random.seed(seed=1000)

# build sdms
sdm1 = SDMTrainer(regression_type=mlr_svd,
                  regression_features=sparse_hog,
                  patch_shape=(16, 16),
                  feature_type=None,
                  normalization_diagonal=150,
                  n_levels=2,
                  downscale=1.3,
示例#17
0
文件: base.py 项目: dubzzz/menpo
def aam_build_benchmark(training_images, training_options=None, verbose=False):
    r"""
    Builds an AAM model.

    Parameters
    ----------
    training_images: list of :class:MaskedImage objects
        A list of the training images.
    training_options: dictionary, optional
        A dictionary with the parameters that will be passed in the AAMBuilder
        (:class:menpo.fitmultilevel.aam.AAMBuilder).
        If None, the default options will be used.
        This is an example of the dictionary with the default options:
            training_options = {'group': 'PTS',
                                'feature_type': 'igo',
                                'transform': PiecewiseAffine,
                                'trilist': None,
                                'normalization_diagonal': None,
                                'n_levels': 3,
                                'downscale': 2,
                                'scaled_shape_models': True,
                                'pyramid_on_features': True,
                                'max_shape_components': None,
                                'max_appearance_components': None,
                                'boundary': 3,
                                'interpolator': 'scipy'
                                }
        For an explanation of the options, please refer to the AAMBuilder
        documentation.

        Default: None
    verbose: boolean, optional
        If True, it prints information regarding the AAM training.

        Default: False

    Returns
    -------
    aam: :class:menpo.fitmultilevel.aam.AAM object
        The trained AAM model.
    """
    if verbose:
        print('AAM Training:')

    # parse options
    if training_options is None:
        training_options = {}

    # group option
    group = training_options.pop('group', None)

    # trilist option
    trilist = training_options.pop('trilist', None)
    if trilist is not None:
        labeller(training_images[0], 'PTS', trilist)
        training_options['trilist'] = \
            training_images[0].landmarks[trilist.__name__].lms.trilist

    # build aam
    aam = AAMBuilder(**training_options).build(training_images, group=group,
                                               verbose=verbose)

    return aam
from menpo.visualize import print_progress
from menpo.landmark import labeller, face_ibug_68_to_face_ibug_68_trimesh
from pathlib import Path
import menpo.io as mio
from menpo.visualize import print_progress

path_to_images = '/home/ice/Documents/Micro-Expression/SAMM/006/006_1_2/'
training_images = []

path_to_lfpw = Path(path_to_images)

training_shapes = []
for lg in print_progress(
        mio.import_landmark_files(path_to_lfpw / '*.pts', verbose=True)):
    training_shapes.append(lg['all'])

for img in print_progress(mio.import_images(path_to_images, verbose=True)):
    # convert to greyscale
    if img.n_channels == 3:
        img = img.as_greyscale()
    # crop to landmarks bounding box with an extra 20% padding
    img = img.crop_to_landmarks_proportion(0.2)
    # rescale image if its diagonal is bigger than 400 pixels
    d = img.diagonal()
    if d > 400:
        img = img.rescale(400.0 / d)
    # define a TriMesh which will be useful for Piecewise Affine Warp of HolisticAAM
    labeller(img, 'PTS', face_ibug_68_to_face_ibug_68_trimesh)
    # append to list
    training_images.append(img)
示例#19
0
 def load_image(i):
     i = i.crop_to_landmarks_proportion(0.5)
     if i.n_channels == 3:
         i = i.as_greyscale()
     labeller(i, 'PTS', face_ibug_68_to_face_ibug_68)
     return i
示例#20
0
文件: sdm_test.py 项目: kod3r/menpo
                [248.92192186, 254.44710508],
                [257.03785057, 217.075461],
                [260.28050441, 202.86155077],
                [261.39108462, 187.78257369],
            ]
        )
    )
)

# load images
filenames = ["breakingbad.jpg", "takeo.ppm", "lenna.png", "einstein.jpg"]
training_images = []
for i in range(4):
    im = mio.import_builtin_asset(filenames[i])
    im.crop_to_landmarks_proportion_inplace(0.1)
    labeller(im, "PTS", ibug_68_trimesh)
    if im.n_channels == 3:
        im = im.as_greyscale(mode="luminosity")
    training_images.append(im)

# Seed the random number generator
np.random.seed(seed=1000)

# build sdms
sdm1 = SDMTrainer(
    regression_type=mlr_svd,
    regression_features=sparse_hog,
    patch_shape=(16, 16),
    feature_type=None,
    normalization_diagonal=150,
    n_levels=2,
示例#21
0
def aam_build_benchmark(training_images, training_options=None, verbose=False):
    r"""
    Builds an AAM model.

    Parameters
    ----------
    training_images: list of :class:MaskedImage objects
        A list of the training images.
    training_options: dictionary, optional
        A dictionary with the parameters that will be passed in the AAMBuilder
        (:class:menpo.fitmultilevel.aam.AAMBuilder).
        If None, the default options will be used.
        This is an example of the dictionary with the default options:
            training_options = {'group': 'PTS',
                                'features': 'igo',
                                'transform': PiecewiseAffine,
                                'trilist': None,
                                'normalization_diagonal': None,
                                'n_levels': 3,
                                'downscale': 2,
                                'scaled_shape_models': True,
                                'max_shape_components': None,
                                'max_appearance_components': None,
                                'boundary': 3
                                }
        For an explanation of the options, please refer to the AAMBuilder
        documentation.

        Default: None
    verbose: boolean, optional
        If True, it prints information regarding the AAM training.

        Default: False

    Returns
    -------
    aam: :class:menpo.fitmultilevel.aam.AAM object
        The trained AAM model.
    """
    if verbose:
        print('AAM Training:')

    # parse options
    if training_options is None:
        training_options = {}

    # group option
    group = training_options.pop('group', None)

    # trilist option
    trilist = training_options.pop('trilist', None)
    if trilist is not None:
        labeller(training_images[0], 'PTS', trilist)
        training_options['trilist'] = \
            training_images[0].landmarks[trilist.__name__].lms.trilist

    # build aam
    aam = AAMBuilder(**training_options).build(training_images,
                                               group=group,
                                               verbose=verbose)

    return aam
示例#22
0
from nose.tools import raises
from StringIO import StringIO

import menpo.io as mio
from menpo.landmark import labeller, ibug_68_trimesh
from menpo.transform import PiecewiseAffine, ThinPlateSplines
from menpo.fitmultilevel.aam import AAMBuilder, PatchBasedAAMBuilder
from menpo.fitmultilevel.featurefunctions import sparse_hog

# load images
filenames = ['breakingbad.jpg', 'takeo.ppm', 'lenna.png', 'einstein.jpg']
training_images = []
for i in range(4):
    im = mio.import_builtin_asset(filenames[i])
    im.crop_to_landmarks_proportion_inplace(0.1)
    labeller(im, 'PTS', ibug_68_trimesh)
    if im.n_channels == 3:
        im = im.as_greyscale(mode='luminosity')
    training_images.append(im)

# build aams
aam1 = AAMBuilder(feature_type=['igo', sparse_hog, None],
                  transform=PiecewiseAffine,
                  trilist=training_images[0].landmarks['ibug_68_trimesh'].
                  lms.trilist,
                  normalization_diagonal=150,
                  n_levels=3,
                  downscale=2,
                  scaled_shape_models=False,
                  pyramid_on_features=False,
                  max_shape_components=[1, 2, 3],
示例#23
0
 def load_image(i):
     i = i.crop_to_landmarks_proportion(0.5)
     if i.n_channels == 3:
         i = i.as_greyscale()
     labeller(i, 'PTS', face_ibug_68_to_face_ibug_68)
     return i