def clm_build_benchmark(training_images, training_options=None, verbose=False): r""" Builds an CLM model. Parameters ---------- training_images: list of :class:MaskedImage objects A list of the training images. training_options: dictionary, optional A dictionary with the parameters that will be passed in the CLMBuilder (:class:menpo.fitmultilevel.clm.CLMBuilder). If None, the default options will be used. This is an example of the dictionary with the default options: training_options = {'group': 'PTS', 'classifier_type': linear_svm_lr, 'patch_shape': (5, 5), 'features': sparse_hog, 'normalization_diagonal': None, 'n_levels': 3, 'downscale': 1.1, 'scaled_shape_models': True, 'max_shape_components': None, 'boundary': 3 } For an explanation of the options, please refer to the CLMBuilder documentation. Default: None verbose: boolean, optional If True, it prints information regarding the CLM training. Default: False Returns ------- clm: :class:menpo.fitmultilevel.clm.CLM object The trained CLM model. """ if verbose: print('CLM Training:') # parse options if training_options is None: training_options = {} # group option group = training_options.pop('group', None) # build aam aam = CLMBuilder(**training_options).build(training_images, group=group, verbose=verbose) return aam
# load images filenames = ['breakingbad.jpg', 'takeo.ppm', 'lenna.png', 'einstein.jpg'] training_images = [] for i in range(4): im = mio.import_builtin_asset(filenames[i]) im.crop_to_landmarks_proportion_inplace(0.1) if im.n_channels == 3: im = im.as_greyscale(mode='luminosity') training_images.append(im) # build clm clm = CLMBuilder(classifier_trainers=linear_svm_lr, patch_shape=(8, 8), features=sparse_hog, normalization_diagonal=100, n_levels=2, downscale=1.1, scaled_shape_models=True, max_shape_components=[2, 2], boundary=3).build(training_images) def test_clm(): assert (clm.n_training_images == 4) assert (clm.n_levels == 2) assert (clm.downscale == 1.1) #assert (clm.features[0] == sparse_hog and len(clm.features) == 1) assert_allclose(np.around(clm.reference_shape.range()), (72., 69.)) assert clm.scaled_shape_models assert clm.pyramid_on_features assert_allclose(clm.patch_shape, (8, 8))
def test_downscale_exception(): clm = CLMBuilder(downscale=1).build(training_images) assert (clm.downscale == 1) CLMBuilder(downscale=0).build(training_images)
def test_features_exception(): CLMBuilder(features=[igo, sparse_hog]).build(training_images)
def test_n_levels_exception(): clm = CLMBuilder(n_levels=0).build(training_images)
def test_classifier_type_2_exception(): CLMBuilder(classifier_trainers=['linear_svm_lr']).build(training_images)
def test_patch_shape_2_exception(): CLMBuilder(patch_shape=(5, 6, 7)).build(training_images)
def test_verbose_mock(mock_stdout): CLMBuilder().build(training_images, verbose=True)
def test_boundary_exception(): CLMBuilder(boundary=-1).build(training_images)
def test_max_shape_components_2_exception(): CLMBuilder(max_shape_components=[1, 2]).build(training_images)
def test_normalization_diagonal_exception(): CLMBuilder(normalization_diagonal=10).build(training_images)
# load images filenames = ['breakingbad.jpg', 'takeo.ppm', 'lenna.png', 'einstein.jpg'] training_images = [] for i in range(4): im = mio.import_builtin_asset(filenames[i]) im.crop_to_landmarks_proportion_inplace(0.1) if im.n_channels == 3: im = im.as_greyscale(mode='luminosity') training_images.append(im) # build clms clm1 = CLMBuilder(classifier_trainers=[linear_svm_lr], patch_shape=(5, 5), features=[igo, sparse_hog, no_op], normalization_diagonal=150, n_levels=3, downscale=2, scaled_shape_models=False, max_shape_components=[1, 2, 3], boundary=3).build(training_images) clm2 = CLMBuilder(classifier_trainers=[random_forest, linear_svm_lr], patch_shape=(3, 10), features=[no_op, no_op], normalization_diagonal=None, n_levels=2, downscale=1.2, scaled_shape_models=True, max_shape_components=None, boundary=0).build(training_images)