def test_clm(): assert (clm.n_training_images == 4) assert (clm.n_levels == 2) assert (clm.downscale == 1.1) #assert (clm.features[0] == sparse_hog and len(clm.features) == 1) assert_allclose(np.around(clm.reference_shape.range()), (72., 69.)) assert clm.scaled_shape_models assert clm.pyramid_on_features assert_allclose(clm.patch_shape, (8, 8)) assert_allclose( [clm.shape_models[j].n_components for j in range(clm.n_levels)], (2, 2)) assert_allclose(clm.n_classifiers_per_level, [68, 68]) ran_0 = np.random.randint(0, clm.n_classifiers_per_level[0]) ran_1 = np.random.randint(0, clm.n_classifiers_per_level[1]) assert (name_of_callable(clm.classifiers[0][ran_0]) == 'linear_svm_lr') assert (name_of_callable(clm.classifiers[1][ran_1]) == 'linear_svm_lr')
def test_clm_2(): assert (clm2.n_training_images == 4) assert (clm2.n_levels == 2) assert (clm2.downscale == 1.2) #assert (clm2.features[0] is no_op and clm2.features[1] is no_op) assert_allclose(np.around(clm2.reference_shape.range()), (169., 161.)) assert clm2.scaled_shape_models assert (not clm2.pyramid_on_features) assert_allclose(clm2.patch_shape, (3, 10)) assert (np.all([ clm2.shape_models[j].n_components == 3 for j in range(clm2.n_levels) ])) assert_allclose(clm2.n_classifiers_per_level, [68, 68]) ran_0 = np.random.randint(0, clm2.n_classifiers_per_level[0]) ran_1 = np.random.randint(0, clm2.n_classifiers_per_level[1]) assert (name_of_callable( clm2.classifiers[0][ran_0]) == 'random_forest_predict') assert (name_of_callable(clm2.classifiers[1][ran_1]) == 'linear_svm_lr')
def test_clm_2(): assert (clm2.n_training_images == 4) assert (clm2.n_levels == 2) assert (clm2.downscale == 1.2) #assert (clm2.features[0] is no_op and clm2.features[1] is no_op) assert_allclose(np.around(clm2.reference_shape.range()), (169., 161.)) assert clm2.scaled_shape_models assert (not clm2.pyramid_on_features) assert_allclose(clm2.patch_shape, (3, 10)) assert (np.all([clm2.shape_models[j].n_components == 3 for j in range(clm2.n_levels)])) assert_allclose(clm2.n_classifiers_per_level, [68, 68]) ran_0 = np.random.randint(0, clm2.n_classifiers_per_level[0]) ran_1 = np.random.randint(0, clm2.n_classifiers_per_level[1]) assert (name_of_callable(clm2.classifiers[0][ran_0]) == 'random_forest_predict') assert (name_of_callable(clm2.classifiers[1][ran_1]) == 'linear_svm_lr')
def test_clm(): assert (clm.n_training_images == 4) assert (clm.n_levels == 2) assert (clm.downscale == 1.1) #assert (clm.features[0] == sparse_hog and len(clm.features) == 1) assert_allclose(np.around(clm.reference_shape.range()), (72., 69.)) assert clm.scaled_shape_models assert clm.pyramid_on_features assert_allclose(clm.patch_shape, (8, 8)) assert_allclose([clm.shape_models[j].n_components for j in range(clm.n_levels)], (2, 2)) assert_allclose(clm.n_classifiers_per_level, [68, 68]) ran_0 = np.random.randint(0, clm.n_classifiers_per_level[0]) ran_1 = np.random.randint(0, clm.n_classifiers_per_level[1]) assert (name_of_callable(clm.classifiers[0][ran_0]) == 'linear_svm_lr') assert (name_of_callable(clm.classifiers[1][ran_1]) == 'linear_svm_lr')
def __str__(self): if self.fitter.pyramid_on_features: feat_str = name_of_callable(self.fitter.features) else: feat_str = [] for j in range(self.n_levels): if isinstance(self.fitter.features[j], str): feat_str.append(self.fitter.features[j]) elif self.fitter.features[j] is None: feat_str.append("none") else: feat_str.append(name_of_callable(self.fitter.features[j])) out = "Fitting Result\n" \ " - Initial error: {0:.4f}\n" \ " - Final error: {1:.4f}\n" \ " - {2} method with {3} pyramid levels, {4} iterations " \ "and using {5} features.".format( self.initial_error(), self.final_error(), self.fitter.algorithm, self.n_levels, self.n_iters, feat_str) return out
def __str__(self): if self.diagonal is not None: diagonal = self.diagonal else: y, x = self.reference_shape.range() diagonal = np.sqrt(x ** 2 + y ** 2) is_custom_perturb_func = (self._perturb_from_gt_bounding_box != noisy_shape_from_bounding_box) regressor_cls = self.algorithms[0]._regressor_cls # Compute scale info strings scales_info = [] lvl_str_tmplt = r""" - Scale {} - {} iterations - Patch shape: {} - Holistic feature: {} - Patch feature: {}""" for k, s in enumerate(self.scales): scales_info.append(lvl_str_tmplt.format( s, self.n_iterations[k], self.patch_shape[k], name_of_callable(self.holistic_features[k]), name_of_callable(self.patch_features[k]))) scales_info = '\n'.join(scales_info) cls_str = r"""Supervised Descent Method - Regression performed using the {reg_alg} algorithm - Regression class: {reg_cls} - Perturbations generated per shape: {n_perturbations} - Images scaled to diagonal: {diagonal:.2f} - Custom perturbation scheme used: {is_custom_perturb_func} - Scales: {scales} {scales_info} """.format( reg_alg=name_of_callable(self._sd_algorithm_cls), reg_cls=name_of_callable(regressor_cls), n_perturbations=self.n_perturbations, diagonal=diagonal, is_custom_perturb_func=is_custom_perturb_func, scales=self.scales, scales_info=scales_info) return cls_str
def test_clm_1(): assert (clm1.n_training_images == 4) assert (clm1.n_levels == 3) assert (clm1.downscale == 2) #assert (clm1.features[0] == igo and clm1.features[2] is no_op) assert_allclose(np.around(clm1.reference_shape.range()), (109., 103.)) assert (not clm1.scaled_shape_models) assert (not clm1.pyramid_on_features) assert_allclose(clm1.patch_shape, (5, 5)) assert_allclose( [clm1.shape_models[j].n_components for j in range(clm1.n_levels)], (1, 2, 3)) assert_allclose(clm1.n_classifiers_per_level, [68, 68, 68]) ran_0 = np.random.randint(0, clm1.n_classifiers_per_level[0]) ran_1 = np.random.randint(0, clm1.n_classifiers_per_level[1]) ran_2 = np.random.randint(0, clm1.n_classifiers_per_level[2]) assert (name_of_callable(clm1.classifiers[0][ran_0]) == 'linear_svm_lr') assert (name_of_callable(clm1.classifiers[1][ran_1]) == 'linear_svm_lr') assert (name_of_callable(clm1.classifiers[2][ran_2]) == 'linear_svm_lr')
def test_clm_1(): assert (clm1.n_training_images == 4) assert (clm1.n_levels == 3) assert (clm1.downscale == 2) #assert (clm1.features[0] == igo and clm1.features[2] is no_op) assert_allclose(np.around(clm1.reference_shape.range()), (109., 103.)) assert (not clm1.scaled_shape_models) assert (not clm1.pyramid_on_features) assert_allclose(clm1.patch_shape, (5, 5)) assert_allclose([clm1.shape_models[j].n_components for j in range(clm1.n_levels)], (1, 2, 3)) assert_allclose(clm1.n_classifiers_per_level, [68, 68, 68]) ran_0 = np.random.randint(0, clm1.n_classifiers_per_level[0]) ran_1 = np.random.randint(0, clm1.n_classifiers_per_level[1]) ran_2 = np.random.randint(0, clm1.n_classifiers_per_level[2]) assert (name_of_callable(clm1.classifiers[0][ran_0]) == 'linear_svm_lr') assert (name_of_callable(clm1.classifiers[1][ran_1]) == 'linear_svm_lr') assert (name_of_callable(clm1.classifiers[2][ran_2]) == 'linear_svm_lr')
def _atm_str(atm): if atm.diagonal is not None: diagonal = atm.diagonal else: y, x = atm.reference_shape.range() diagonal = np.sqrt(x ** 2 + y ** 2) # Compute scale info strings scales_info = [] lvl_str_tmplt = r""" - Scale {} - Holistic feature: {} - Template shape: {} - {} shape components""" for k, s in enumerate(atm.scales): scales_info.append(lvl_str_tmplt.format( s, name_of_callable(atm.holistic_features[k]), atm.warped_templates[k].shape, atm.shape_models[k].n_components)) # Patch based ATM if hasattr(atm, 'patch_shape'): for k in range(len(scales_info)): scales_info[k] += '\n - Patch shape: {}'.format( atm.patch_shape[k]) scales_info = '\n'.join(scales_info) cls_str = r"""{class_title} - Images warped with {transform} transform - Images scaled to diagonal: {diagonal:.2f} - Scales: {scales} {scales_info} """.format(class_title=atm._str_title, transform=name_of_callable(atm.transform), diagonal=diagonal, scales=atm.scales, scales_info=scales_info) return cls_str
def test_clm_3(): assert (clm3.n_training_images == 4) assert (clm3.n_levels == 1) assert (clm3.downscale == 3) #assert (clm3.features[0] == igo and len(clm3.features) == 1) assert_allclose(np.around(clm3.reference_shape.range()), (169., 161.)) assert clm3.scaled_shape_models assert clm3.pyramid_on_features assert_allclose(clm3.patch_shape, (2, 3)) assert (np.all([ clm3.shape_models[j].n_components == 1 for j in range(clm3.n_levels) ])) assert_allclose(clm3.n_classifiers_per_level, [68]) ran_0 = np.random.randint(0, clm3.n_classifiers_per_level[0]) assert (name_of_callable(clm3.classifiers[0][ran_0]) == 'linear_svm_lr')
def test_clm_3(): assert (clm3.n_training_images == 4) assert (clm3.n_levels == 1) assert (clm3.downscale == 3) #assert (clm3.features[0] == igo and len(clm3.features) == 1) assert_allclose(np.around(clm3.reference_shape.range()), (169., 161.)) assert clm3.scaled_shape_models assert clm3.pyramid_on_features assert_allclose(clm3.patch_shape, (2, 3)) assert (np.all([clm3.shape_models[j].n_components == 1 for j in range(clm3.n_levels)])) assert_allclose(clm3.n_classifiers_per_level, [68]) ran_0 = np.random.randint(0, clm3.n_classifiers_per_level[0]) assert (name_of_callable(clm3.classifiers[0][ran_0]) == 'linear_svm_lr')
def test_name_of_callable_partial(): assert name_of_callable(sparse_hog) == 'sparse_hog'
def test_name_of_callable_object_with_call(): assert name_of_callable(Foo()) == 'Foo'
def __str__(self): from menpofit.base import name_of_callable out = "{}\n - {} training images.\n".format(self._str_title, self.n_training_images) # small strings about number of channels, channels string and downscale down_str = [] for j in range(self.n_levels): if j == self.n_levels - 1: down_str.append('(no downscale)') else: down_str.append('(downscale by {})'.format( self.downscale**(self.n_levels - j - 1))) temp_img = Image(image_data=np.random.rand(50, 50)) if self.pyramid_on_features: temp = self.features(temp_img) n_channels = [temp.n_channels] * self.n_levels else: n_channels = [] for j in range(self.n_levels): temp = self.features[j](temp_img) n_channels.append(temp.n_channels) # string about features and channels if self.pyramid_on_features: feat_str = "- Feature is {} with ".format( name_of_callable(self.features)) if n_channels[0] == 1: ch_str = ["channel"] else: ch_str = ["channels"] else: feat_str = [] ch_str = [] for j in range(self.n_levels): feat_str.append("- Feature is {} with ".format( name_of_callable(self.features[j]))) if n_channels[j] == 1: ch_str.append("channel") else: ch_str.append("channels") if self.n_levels > 1: if self.scaled_shape_models: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}.\n - Each level has a scaled shape " \ "model (reference frame).\n - Patch size is {}W x " \ "{}H.\n".format(out, self.n_levels, self.downscale, self.patch_shape[1], self.patch_shape[0]) else: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}:\n - Shape models (reference frames) " \ "are not scaled.\n - Patch size is {}W x " \ "{}H.\n".format(out, self.n_levels, self.downscale, self.patch_shape[1], self.patch_shape[0]) if self.pyramid_on_features: out = "{} - Pyramid was applied on feature space.\n " \ "{}{} {} per image.\n".format(out, feat_str, n_channels[0], ch_str[0]) else: out = "{} - Features were extracted at each pyramid " \ "level.\n".format(out) for i in range(self.n_levels - 1, -1, -1): out = "{} - Level {} {}: \n".format(out, self.n_levels - i, down_str[i]) if not self.pyramid_on_features: out = "{} {}{} {} per image.\n".format( out, feat_str[i], n_channels[i], ch_str[i]) out = "{0} - {1} shape components ({2:.2f}% of " \ "variance)\n - {3} {4} classifiers.\n".format( out, self.shape_models[i].n_components, self.shape_models[i].variance_ratio() * 100, self.n_classifiers_per_level[i], name_of_callable(self.classifiers[i][0])) else: if self.pyramid_on_features: feat_str = [feat_str] out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n" \ " - {4} shape components ({5:.2f}% of " \ "variance)\n - {6} {7} classifiers.".format( out, feat_str[0], n_channels[0], ch_str[0], self.shape_models[0].n_components, self.shape_models[0].variance_ratio() * 100, self.n_classifiers_per_level[0], name_of_callable(self.classifiers[0][0])) return out
def __str__(self): out = "Supervised Descent Method\n" \ " - Non-Parametric '{}' Regressor\n" \ " - {} training images.\n".format( name_of_callable(self._fitters[0].regressor), self._n_training_images) # small strings about number of channels, channels string and downscale down_str = [] for j in range(self.n_levels): if j == self.n_levels - 1: down_str.append('(no downscale)') else: down_str.append('(downscale by {})'.format( self.downscale**(self.n_levels - j - 1))) temp_img = Image(image_data=np.random.rand(40, 40)) if self.pyramid_on_features: temp = self.features(temp_img) n_channels = [temp.n_channels] * self.n_levels else: n_channels = [] for j in range(self.n_levels): temp = self.features[j](temp_img) n_channels.append(temp.n_channels) # string about features and channels if self.pyramid_on_features: feat_str = "- Feature is {} with ".format( name_of_callable(self.features)) if n_channels[0] == 1: ch_str = ["channel"] else: ch_str = ["channels"] else: feat_str = [] ch_str = [] for j in range(self.n_levels): if isinstance(self.features[j], str): feat_str.append("- Feature is {} with ".format( self.features[j])) elif self.features[j] is None: feat_str.append("- No features extracted. ") else: feat_str.append("- Feature is {} with ".format( self.features[j].__name__)) if n_channels[j] == 1: ch_str.append("channel") else: ch_str.append("channels") if self.n_levels > 1: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}.\n".format(out, self.n_levels, self.downscale) if self.pyramid_on_features: out = "{} - Pyramid was applied on feature space.\n " \ "{}{} {} per image.\n".format(out, feat_str, n_channels[0], ch_str[0]) else: out = "{} - Features were extracted at each pyramid " \ "level.\n".format(out) for i in range(self.n_levels - 1, -1, -1): out = "{} - Level {} {}: \n {}{} {} per " \ "image.\n".format( out, self.n_levels - i, down_str[i], feat_str[i], n_channels[i], ch_str[i]) else: if self.pyramid_on_features: feat_str = [feat_str] out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n".format( out, feat_str[0], n_channels[0], ch_str[0]) return out
def __str__(self): out = "{0} Fitter\n" \ " - Lucas-Kanade {1}\n" \ " - Transform is {2} and residual is {3}.\n" \ " - {4} training images.\n".format( self.aam._str_title, self._fitters[0].algorithm, self._fitters[0].transform.__class__.__name__, self._fitters[0].residual.type, self.aam.n_training_images) # small strings about number of channels, channels string and downscale n_channels = [] down_str = [] for j in range(self.n_levels): n_channels.append( self._fitters[j].appearance_model.template_instance.n_channels) if j == self.n_levels - 1: down_str.append('(no downscale)') else: down_str.append('(downscale by {})'.format( self.downscale**(self.n_levels - j - 1))) # string about features and channels if self.pyramid_on_features: feat_str = "- Feature is {} with ".format( name_of_callable(self.features)) if n_channels[0] == 1: ch_str = ["channel"] else: ch_str = ["channels"] else: feat_str = [] ch_str = [] for j in range(self.n_levels): if isinstance(self.features[j], str): feat_str.append("- Feature is {} with ".format( self.features[j])) elif self.features[j] is None: feat_str.append("- No features extracted. ") else: feat_str.append("- Feature is {} with ".format( self.features[j].__name__)) if n_channels[j] == 1: ch_str.append("channel") else: ch_str.append("channels") if self.n_levels > 1: if self.aam.scaled_shape_models: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}.\n - Each level has a scaled shape " \ "model (reference frame).\n".format(out, self.n_levels, self.downscale) else: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}:\n - Shape models (reference frames) " \ "are not scaled.\n".format(out, self.n_levels, self.downscale) if self.pyramid_on_features: out = "{} - Pyramid was applied on feature space.\n " \ "{}{} {} per image.\n".format(out, feat_str, n_channels[0], ch_str[0]) if not self.aam.scaled_shape_models: out = "{} - Reference frames of length {} " \ "({} x {}C, {} x {}C)\n".format( out, self._fitters[0].appearance_model.n_features, self._fitters[0].template.n_true_pixels(), n_channels[0], self._fitters[0].template._str_shape, n_channels[0]) else: out = "{} - Features were extracted at each pyramid " \ "level.\n".format(out) for i in range(self.n_levels - 1, -1, -1): out = "{} - Level {} {}: \n".format(out, self.n_levels - i, down_str[i]) if not self.pyramid_on_features: out = "{} {}{} {} per image.\n".format( out, feat_str[i], n_channels[i], ch_str[i]) if (self.aam.scaled_shape_models or (not self.pyramid_on_features)): out = "{} - Reference frame of length {} " \ "({} x {}C, {} x {}C)\n".format( out, self._fitters[i].appearance_model.n_features, self._fitters[i].template.n_true_pixels(), n_channels[i], self._fitters[i].template._str_shape, n_channels[i]) out = "{0} - {1} motion components\n - {2} active " \ "appearance components ({3:.2f}% of original " \ "variance)\n".format( out, self._fitters[i].transform.n_parameters, self._fitters[i].appearance_model.n_active_components, self._fitters[i].appearance_model.variance_ratio() * 100) else: if self.pyramid_on_features: feat_str = [feat_str] out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n" \ " - Reference frame of length {4} ({5} x {6}C, " \ "{7} x {8}C)\n - {9} motion parameters\n" \ " - {10} appearance components ({11:.2f}% of original " \ "variance)\n".format( out, feat_str[0], n_channels[0], ch_str[0], self._fitters[0].appearance_model.n_features, self._fitters[0].template.n_true_pixels(), n_channels[0], self._fitters[0].template._str_shape, n_channels[0], self._fitters[0].transform.n_parameters, self._fitters[0].appearance_model.n_active_components, self._fitters[0].appearance_model.variance_ratio() * 100) return out
def __str__(self): out = "{0} Fitter\n" \ " - Lucas-Kanade {1}\n" \ " - Transform is {2} and residual is {3}.\n" \ " - {4} training images.\n".format( self.atm._str_title, self._fitters[0].algorithm, self._fitters[0].transform.__class__.__name__, self._fitters[0].residual.type, self.atm.n_training_shapes) # small strings about number of channels, channels string and downscale n_channels = [] down_str = [] for j in range(self.n_levels): n_channels.append( self._fitters[j].template.n_channels) if j == self.n_levels - 1: down_str.append('(no downscale)') else: down_str.append('(downscale by {})'.format( self.downscale**(self.n_levels - j - 1))) # string about features and channels if self.pyramid_on_features: feat_str = "- Feature is {} with ".format(name_of_callable( self.features)) if n_channels[0] == 1: ch_str = ["channel"] else: ch_str = ["channels"] else: feat_str = [] ch_str = [] for j in range(self.n_levels): if isinstance(self.features[j], str): feat_str.append("- Feature is {} with ".format( self.features[j])) elif self.features[j] is None: feat_str.append("- No features extracted. ") else: feat_str.append("- Feature is {} with ".format( self.features[j].__name__)) if n_channels[j] == 1: ch_str.append("channel") else: ch_str.append("channels") if self.n_levels > 1: if self.atm.scaled_shape_models: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}.\n - Each level has a scaled shape " \ "model (reference frame).\n".format(out, self.n_levels, self.downscale) else: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}:\n - Shape models (reference frames) " \ "are not scaled.\n".format(out, self.n_levels, self.downscale) if self.pyramid_on_features: out = "{} - Pyramid was applied on feature space.\n " \ "{}{} {} per image.\n".format(out, feat_str, n_channels[0], ch_str[0]) if not self.atm.scaled_shape_models: out = "{} - Reference frames of length {} " \ "({} x {}C, {} x {}C)\n".format( out, self._fitters[0].template.n_true_pixels() * n_channels[0], self._fitters[0].template.n_true_pixels(), n_channels[0], self._fitters[0].template._str_shape, n_channels[0]) else: out = "{} - Features were extracted at each pyramid " \ "level.\n".format(out) for i in range(self.n_levels - 1, -1, -1): out = "{} - Level {} {}: \n".format(out, self.n_levels - i, down_str[i]) if not self.pyramid_on_features: out = "{} {}{} {} per image.\n".format( out, feat_str[i], n_channels[i], ch_str[i]) if (self.atm.scaled_shape_models or (not self.pyramid_on_features)): out = "{} - Reference frame of length {} " \ "({} x {}C, {} x {}C)\n".format( out, self._fitters[i].template.n_true_pixels() * n_channels[i], self._fitters[i].template.n_true_pixels(), n_channels[i], self._fitters[i].template._str_shape, n_channels[i]) out = "{0} - {1} motion components\n\n".format( out, self._fitters[i].transform.n_parameters) else: if self.pyramid_on_features: feat_str = [feat_str] out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n" \ " - Reference frame of length {4} ({5} x {6}C, " \ "{7} x {8}C)\n - {9} motion parameters\n".format( out, feat_str[0], n_channels[0], ch_str[0], self._fitters[0].template.n_true_pixels() * n_channels[0], self._fitters[0].template.n_true_pixels(), n_channels[0], self._fitters[0].template._str_shape, n_channels[0], self._fitters[0].transform.n_parameters) return out
def __str__(self): return "{}Supervised Descent Method for CLMs:\n" \ " - Parametric '{}' Regressor\n" \ " - {} training images.\n".format( self.clm.__str__(), name_of_callable(self._fitters[0].regressor), self._n_training_images)
def __str__(self): out = "{}\n - {} training images.\n".format(self._str_title, self.n_training_images) # small strings about number of channels, channels string and downscale n_channels = [] down_str = [] for j in range(self.n_levels): n_channels.append( self.appearance_models[j].template_instance.n_channels) if j == self.n_levels - 1: down_str.append('(no downscale)') else: down_str.append('(downscale by {})'.format( self.downscale**(self.n_levels - j - 1))) # string about features and channels if self.pyramid_on_features: feat_str = "- Feature is {} with ".format( name_of_callable(self.features)) if n_channels[0] == 1: ch_str = ["channel"] else: ch_str = ["channels"] else: feat_str = [] ch_str = [] for j in range(self.n_levels): feat_str.append("- Feature is {} with ".format( name_of_callable(self.features[j]))) if n_channels[j] == 1: ch_str.append("channel") else: ch_str.append("channels") out = "{} - {} Warp.\n".format(out, name_of_callable(self.transform)) if self.n_levels > 1: if self.scaled_shape_models: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}.\n - Each level has a scaled shape " \ "model (reference frame).\n".format(out, self.n_levels, self.downscale) else: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}:\n - Shape models (reference frames) " \ "are not scaled.\n".format(out, self.n_levels, self.downscale) if self.pyramid_on_features: out = "{} - Pyramid was applied on feature space.\n " \ "{}{} {} per image.\n".format(out, feat_str, n_channels[0], ch_str[0]) if not self.scaled_shape_models: out = "{} - Reference frames of length {} " \ "({} x {}C, {} x {}C)\n".format( out, self.appearance_models[0].n_features, self.appearance_models[0].template_instance.n_true_pixels(), n_channels[0], self.appearance_models[0].template_instance._str_shape, n_channels[0]) else: out = "{} - Features were extracted at each pyramid " \ "level.\n".format(out) for i in range(self.n_levels - 1, -1, -1): out = "{} - Level {} {}: \n".format(out, self.n_levels - i, down_str[i]) if not self.pyramid_on_features: out = "{} {}{} {} per image.\n".format( out, feat_str[i], n_channels[i], ch_str[i]) if (self.scaled_shape_models or (not self.pyramid_on_features)): out = "{} - Reference frame of length {} " \ "({} x {}C, {} x {}C)\n".format( out, self.appearance_models[i].n_features, self.appearance_models[i].template_instance.n_true_pixels(), n_channels[i], self.appearance_models[i].template_instance._str_shape, n_channels[i]) out = "{0} - {1} shape components ({2:.2f}% of " \ "variance)\n - {3} appearance components " \ "({4:.2f}% of variance)\n".format( out, self.shape_models[i].n_components, self.shape_models[i].variance_ratio() * 100, self.appearance_models[i].n_components, self.appearance_models[i].variance_ratio() * 100) else: if self.pyramid_on_features: feat_str = [feat_str] out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n" \ " - Reference frame of length {4} ({5} x {6}C, " \ "{7} x {8}C)\n - {9} shape components ({10:.2f}% of " \ "variance)\n - {11} appearance components ({12:.2f}% of " \ "variance)\n".format( out, feat_str[0], n_channels[0], ch_str[0], self.appearance_models[0].n_features, self.appearance_models[0].template_instance.n_true_pixels(), n_channels[0], self.appearance_models[0].template_instance._str_shape, n_channels[0], self.shape_models[0].n_components, self.shape_models[0].variance_ratio() * 100, self.appearance_models[0].n_components, self.appearance_models[0].variance_ratio() * 100) return out
def __str__(self): from menpofit.base import name_of_callable out = "{0} Fitter\n" \ " - Gradient-Descent {1}\n" \ " - Transform is {2}.\n" \ " - {3} training images.\n".format( self.clm._str_title, self._fitters[0].algorithm, self._fitters[0].transform.__class__.__name__, self.clm.n_training_images) # small strings about number of channels, channels string and downscale down_str = [] for j in range(self.n_levels): if j == self.n_levels - 1: down_str.append('(no downscale)') else: down_str.append('(downscale by {})'.format( self.downscale**(self.n_levels - j - 1))) temp_img = Image(image_data=np.random.rand(50, 50)) if self.pyramid_on_features: temp = self.features(temp_img) n_channels = [temp.n_channels] * self.n_levels else: n_channels = [] for j in range(self.n_levels): temp = self.features[j](temp_img) n_channels.append(temp.n_channels) # string about features and channels if self.pyramid_on_features: feat_str = "- Feature is {} with ".format( name_of_callable(self.features)) if n_channels[0] == 1: ch_str = ["channel"] else: ch_str = ["channels"] else: feat_str = [] ch_str = [] for j in range(self.n_levels): if isinstance(self.features[j], str): feat_str.append("- Feature is {} with ".format( self.features[j])) elif self.features[j] is None: feat_str.append("- No features extracted. ") else: feat_str.append("- Feature is {} with ".format( name_of_callable(self.features[j]))) if n_channels[j] == 1: ch_str.append("channel") else: ch_str.append("channels") if self.n_levels > 1: if self.clm.scaled_shape_models: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}.\n - Each level has a scaled shape " \ "model (reference frame).\n - Patch size is {}W x " \ "{}H.\n".format(out, self.n_levels, self.downscale, self.clm.patch_shape[1], self.clm.patch_shape[0]) else: out = "{} - Gaussian pyramid with {} levels and downscale " \ "factor of {}:\n - Shape models (reference frames) " \ "are not scaled.\n - Patch size is {}W x " \ "{}H.\n".format(out, self.n_levels, self.downscale, self.clm.patch_shape[1], self.clm.patch_shape[0]) if self.pyramid_on_features: out = "{} - Pyramid was applied on feature space.\n " \ "{}{} {} per image.\n".format(out, feat_str, n_channels[0], ch_str[0]) else: out = "{} - Features were extracted at each pyramid " \ "level.\n".format(out) for i in range(self.n_levels - 1, -1, -1): out = "{} - Level {} {}: \n".format(out, self.n_levels - i, down_str[i]) if not self.pyramid_on_features: out = "{} {}{} {} per image.\n".format( out, feat_str[i], n_channels[i], ch_str[i]) out = "{0} - {1} motion components\n - {2} {3} " \ "classifiers.\n".format( out, self._fitters[i].transform.n_parameters, len(self._fitters[i].classifiers), name_of_callable(self._fitters[i].classifiers[0])) else: if self.pyramid_on_features: feat_str = [feat_str] out = "{0} - No pyramid used:\n {1}{2} {3} per image.\n" \ " - {4} motion components\n - {5} {6} " \ "classifiers.".format( out, feat_str[0], n_channels[0], ch_str[0], out, self._fitters[0].transform.n_parameters, len(self._fitters[0].classifiers), name_of_callable(self._fitters[0].classifiers[0])) return out
def test_name_of_callable_function(): assert name_of_callable(igo) == 'igo'