Exemple #1
0
    def __str__(self):
        is_custom_perturb_func = (self._perturb_from_gt_bounding_box !=
                                  noisy_shape_from_bounding_box)
        if is_custom_perturb_func:
            is_custom_perturb_func = name_of_callable(
                self._perturb_from_gt_bounding_box)
        regressor_cls = self.algorithms[0]._regressor_cls

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {}
     - {} iterations"""
        for k, s in enumerate(self.scales):
            scales_info.append(lvl_str_tmplt.format(s, self.n_iterations[k]))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""Supervised Descent Method
 - Regression performed using the {reg_alg} algorithm
   - Regression class: {reg_cls}
 - Perturbations generated per shape: {n_perturbations}
 - Custom perturbation scheme used: {is_custom_perturb_func}
 - Scales: {scales}
{scales_info}
""".format(reg_alg=name_of_callable(self._sd_algorithm_cls[0]),
           reg_cls=name_of_callable(regressor_cls),
           n_perturbations=self.n_perturbations,
           is_custom_perturb_func=is_custom_perturb_func,
           scales=self.scales,
           scales_info=scales_info)

        return self.aam.__str__() + cls_str
Exemple #2
0
    def __str__(self):
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x ** 2 + y ** 2)

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {}
     - Holistic feature: {}
     - Template shape: {}"""
        for k, s in enumerate(self.scales):
            scales_info.append(lvl_str_tmplt.format(
                    s, name_of_callable(self.holistic_features[k]),
                    self.templates[k].shape))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""Lucas-Kanade {class_title}
 - {residual}
 - Images warped with {transform} transform
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
""".format(class_title=self.algorithms[0],
           residual=self.algorithms[0].residual,
           transform=name_of_callable(self.transform_cls),
           diagonal=diagonal,
           scales=self.scales,
           scales_info=scales_info)
        return cls_str
Exemple #3
0
    def __str__(self):
        is_custom_perturb_func = (self._perturb_from_gt_bounding_box !=
                                  noisy_shape_from_bounding_box)
        if is_custom_perturb_func:
            is_custom_perturb_func = name_of_callable(
                    self._perturb_from_gt_bounding_box)
        regressor_cls = self.algorithms[0]._regressor_cls

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {}
     - {} iterations"""
        for k, s in enumerate(self.scales):
            scales_info.append(lvl_str_tmplt.format(s, self.n_iterations[k]))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""Supervised Descent Method
 - Regression performed using the {reg_alg} algorithm
   - Regression class: {reg_cls}
 - Perturbations generated per shape: {n_perturbations}
 - Custom perturbation scheme used: {is_custom_perturb_func}
 - Scales: {scales}
{scales_info}
""".format(
                reg_alg=name_of_callable(self._sd_algorithm_cls[0]),
                reg_cls=name_of_callable(regressor_cls),
                n_perturbations=self.n_perturbations,
                is_custom_perturb_func=is_custom_perturb_func,
                scales=self.scales,
                scales_info=scales_info)

        return self.aam.__str__() + cls_str
Exemple #4
0
    def __str__(self):
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x**2 + y**2)

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {}
     - Holistic feature: {}
     - Template shape: {}"""
        for k, s in enumerate(self.scales):
            scales_info.append(
                lvl_str_tmplt.format(
                    s, name_of_callable(self.holistic_features[k]),
                    self.templates[k].shape))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""Lucas-Kanade {class_title}
 - {residual}
 - Images warped with {transform} transform
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
""".format(class_title=self.algorithms[0],
           residual=self.algorithms[0].residual,
           transform=name_of_callable(self.transform_cls),
           diagonal=diagonal,
           scales=self.scales,
           scales_info=scales_info)
        return cls_str
Exemple #5
0
    def __str__(self):
        cls_str = r"""{}
 - Shape model class: {}
   - {} vertices, {} triangles
   - {} shape components
   - Instance class: {}
 - Texture model class: {}
   - {} texture components
   - Diagonal of {} pixels
   - Features function is {}
   - {} channels
 - Sparse landmarks class: {}
   - {} landmarks
""".format(
            self._str_title,
            name_of_callable(self.shape_model),
            self.n_vertices,
            self.n_triangles,
            self.shape_model.n_components,
            name_of_callable(self.shape_model.template_instance),
            name_of_callable(self.texture_model),
            self.texture_model.n_components,
            self.diagonal,
            name_of_callable(self.holistic_features),
            self.n_channels,
            name_of_callable(self.landmarks),
            self.landmarks.n_points,
        )
        return cls_str
Exemple #6
0
def _aps_str(aps):
    if aps.diagonal is not None:
        diagonal = aps.diagonal
    else:
        y, x = aps.reference_shape.range()
        diagonal = np.sqrt(x**2 + y**2)

    # Compute scale info strings
    scales_info = []
    lvl_str_tmplt = r"""   - Scale {}
     - Holistic feature: {}
     - Patch shape: {}
     - Appearance model class: {}
       - {}
       - {} features per point ({} in total)
       - {}
     - Shape model class: {}
       - {}
       - {} shape components
       - {} similarity transform parameters
     - Deformation model class: {}
       - {}"""
    for k, s in enumerate(aps.scales):
        comp_str = "No SVD used"
        if aps.appearance_models[k].n_components is not None:
            comp_str = "{} SVD components".format(
                aps.appearance_models[k].n_components)
        shape_model_str = "Trained using PCA"
        if aps.shape_graph[k] is not None:
            shape_model_str = "Trained using GMRF: {}".format(
                aps.shape_graph[k].__str__())
        scales_info.append(
            lvl_str_tmplt.format(
                s, name_of_callable(aps.holistic_features[k]),
                aps.patch_shape[k], name_of_callable(aps.appearance_models[k]),
                aps.appearance_models[k].graph.__str__(),
                aps.appearance_models[k].n_features_per_vertex,
                aps.appearance_models[k].n_features, comp_str,
                name_of_callable(aps.shape_models[k]), shape_model_str,
                aps.shape_models[k].model.n_components,
                aps.shape_models[k].n_global_parameters,
                name_of_callable(aps.deformation_models[k]),
                aps.deformation_models[k].graph.__str__()))
    scales_info = '\n'.join(scales_info)

    cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
""".format(class_title=aps._str_title,
           diagonal=diagonal,
           scales=aps.scales,
           scales_info=scales_info)
    return cls_str
Exemple #7
0
def _aps_str(aps):
    if aps.diagonal is not None:
        diagonal = aps.diagonal
    else:
        y, x = aps.reference_shape.range()
        diagonal = np.sqrt(x ** 2 + y ** 2)

    # Compute scale info strings
    scales_info = []
    lvl_str_tmplt = r"""   - Scale {}
     - Holistic feature: {}
     - Patch shape: {}
     - Appearance model class: {}
       - {}
       - {} features per point ({} in total)
       - {}
     - Shape model class: {}
       - {}
       - {} shape components
       - {} similarity transform parameters
     - Deformation model class: {}
       - {}"""
    for k, s in enumerate(aps.scales):
        comp_str = "No SVD used"
        if aps.appearance_models[k].n_components is not None:
            comp_str = "{} SVD components".format(aps.appearance_models[k].n_components)
        shape_model_str = "Trained using PCA"
        if aps.shape_graph[k] is not None:
            shape_model_str = "Trained using GMRF: {}".format(aps.shape_graph[k].__str__())
        scales_info.append(lvl_str_tmplt.format(
            s, name_of_callable(aps.holistic_features[k]),
            aps.patch_shape[k],
            name_of_callable(aps.appearance_models[k]),
            aps.appearance_models[k].graph.__str__(),
            aps.appearance_models[k].n_features_per_vertex,
            aps.appearance_models[k].n_features,
            comp_str,
            name_of_callable(aps.shape_models[k]),
            shape_model_str,
            aps.shape_models[k].model.n_components,
            aps.shape_models[k].n_global_parameters,
            name_of_callable(aps.deformation_models[k]),
            aps.deformation_models[k].graph.__str__()))
    scales_info = '\n'.join(scales_info)

    cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
""".format(class_title=aps._str_title,
           diagonal=diagonal,
           scales=aps.scales,
           scales_info=scales_info)
    return cls_str
Exemple #8
0
    def __str__(self):
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x**2 + y**2)

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {}
     - Holistic feature: {}
     - Ensemble of experts class: {}
       - {} experts
       - {} class
       - Patch shape: {} x {}
       - Patch normalisation: {}
       - Context shape: {} x {}
       - Cosine mask: {}
     - Shape model class: {}
       - {} shape components
       - {} similarity transform parameters"""
        for k, s in enumerate(self.scales):
            scales_info.append(
                lvl_str_tmplt.format(
                    s, name_of_callable(self.holistic_features[k]),
                    name_of_callable(self.expert_ensemble_cls[k]),
                    self.expert_ensembles[k].n_experts,
                    name_of_callable(self.expert_ensembles[k]._icf),
                    self.expert_ensembles[k].patch_shape[0],
                    self.expert_ensembles[k].patch_shape[1],
                    name_of_callable(
                        self.expert_ensembles[k].patch_normalisation),
                    self.expert_ensembles[k].context_shape[0],
                    self.expert_ensembles[k].context_shape[1],
                    self.expert_ensembles[k].cosine_mask,
                    name_of_callable(self.shape_models[k]),
                    self.shape_models[k].model.n_components,
                    self.shape_models[k].n_global_parameters))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
""".format(class_title=self._str_title,
           diagonal=diagonal,
           scales=self.scales,
           scales_info=scales_info)
        return cls_str
    def decorator(labelling_method):
        # Shadowing parent scope variables inside a nested function
        # kills the scope of the parent variable, so we need a unique alias
        # for the group name
        gl = (group_label if group_label is not None else
              name_of_callable(labelling_method))
        # Duck type group label onto method itself
        labelling_method.group_label = gl
        # Set up the global docs
        labelling_method.__doc__ += _labeller_docs

        @wraps(labelling_method)
        def wrapper(x, return_mapping=False):
            from menpo.shape import PointCloud
            # Accepts LandmarkGroup, PointCloud or ndarray
            if isinstance(x, np.ndarray):
                x = PointCloud(x, copy=False)

            if isinstance(x, PointCloud):
                new_pcloud, mapping = labelling_method(x)
                # This parameter is only provided for internal use so that
                # other labellers can piggyback off one another
                if return_mapping:
                    return new_pcloud, mapping
                else:
                    return new_pcloud
            if isinstance(x, LandmarkGroup):
                new_pcloud, mapping = labelling_method(x.lms)
                return LandmarkGroup.init_from_indices_mapping(
                    new_pcloud, mapping)

        return wrapper
Exemple #10
0
    def decorator(labelling_method):
        # Shadowing parent scope variables inside a nested function
        # kills the scope of the parent variable, so we need a unique alias
        # for the group name
        gl = (group_label if group_label is not None
              else name_of_callable(labelling_method))
        # Duck type group label onto method itself
        labelling_method.group_label = gl
        # Set up the global docs
        labelling_method.__doc__ += _labeller_docs

        @wraps(labelling_method)
        def wrapper(x, return_mapping=False):
            from menpo.shape import PointCloud
            # Accepts LandmarkGroup, PointCloud or ndarray
            if isinstance(x, np.ndarray):
                x = PointCloud(x, copy=False)

            if isinstance(x, PointCloud):
                new_pcloud, mapping = labelling_method(x)
                # This parameter is only provided for internal use so that
                # other labellers can piggyback off one another
                if return_mapping:
                    return new_pcloud, mapping
                else:
                    return new_pcloud
            if isinstance(x, LandmarkGroup):
                new_pcloud, mapping = labelling_method(x.lms)
                return LandmarkGroup.init_from_indices_mapping(new_pcloud, 
                                                               mapping)
        return wrapper
Exemple #11
0
 def __str__(self):
     incremental_str = (" - Can be incrementally updated."
                        if self.is_incremental else " - Cannot be "
                        "incrementally updated.")
     svd_str = (" - # SVD components:        {}".format(self.n_components)
                if self.n_components is not None else " - No "
                "SVD used.")
     _Q_sparse = "scipy.sparse" if self.sparse else "numpy.array"
     q_str = " - Q is stored as {} with {} precision".format(
         _Q_sparse, name_of_callable(self.dtype))
     mode_str = "concatenated" if self.mode == "concatenation" else "subtracted"
     str_out = ("Gaussian MRF Model \n"
                " - {}\n"
                " - The data of the vertexes of each edge are {}.\n"
                "{}\n"
                " - # variables (vertexes):  {}\n"
                " - # features per variable: {}\n"
                " - # features in total:     {}\n"
                "{}\n"
                " - # samples:               {}\n"
                "{}\n".format(
                    self.graph.__str__(),
                    mode_str,
                    q_str,
                    self.graph.n_vertices,
                    self.n_features_per_vertex,
                    self.n_features,
                    svd_str,
                    self.n_samples,
                    incremental_str,
                ))
     return str_out
Exemple #12
0
    def decorator(labelling_method):
        # Shadowing parent scope variables inside a nested function
        # kills the scope of the parent variable, so we need a unique alias
        # for the group name
        gl = (group_label if group_label is not None else
              name_of_callable(labelling_method))
        # Duck type group label onto method itself
        labelling_method.group_label = gl
        # Set up the global docs
        labelling_method.__doc__ += _labeller_docs

        @wraps(labelling_method)
        def wrapper(x, return_mapping=False):
            from menpo.shape import PointCloud
            # Accepts PointCloud subclass or ndarray
            if isinstance(x, np.ndarray):
                x = PointCloud(x, copy=False)

            new_pcloud, mapping = labelling_method(x)
            if return_mapping:
                return new_pcloud, mapping
            else:
                return new_pcloud

        return wrapper
Exemple #13
0
 def __str__(self):
     incremental_str = (' - Can be incrementally updated.' if
                        self.is_incremental else ' - Cannot be '
                                                 'incrementally updated.')
     svd_str = (' - # SVD components:        {}'.format(self.n_components)
                if self.n_components is not None else ' - No ' 'SVD used.')
     _Q_sparse = 'scipy.sparse' if self.sparse else 'numpy.array'
     q_str = ' - Q is stored as {} with {} precision'.format(
         _Q_sparse, name_of_callable(self.dtype))
     mode_str = ('concatenated' if self.mode == 'concatenation' else
                 'subtracted')
     str_out = 'Gaussian MRF Model \n' \
               ' - {}\n' \
               ' - The data of the vertexes of each edge are {}.\n' \
               '{}\n' \
               ' - # variables (vertexes):  {}\n' \
               ' - # features per variable: {}\n' \
               ' - # features in total:     {}\n' \
               '{}\n' \
               ' - # samples:               {}\n' \
               '{}\n'.format(
         self.graph.__str__(), mode_str, q_str, self.graph.n_vertices,
         self.n_features_per_vertex, self.n_features, svd_str,
         self.n_samples, incremental_str)
     return str_out
Exemple #14
0
    def __str__(self):
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x ** 2 + y ** 2)

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {}
     - Holistic feature: {}
     - Ensemble of experts class: {}
       - {} experts
       - {} class
       - Patch shape: {} x {}
       - Patch normalisation: {}
       - Context shape: {} x {}
       - Cosine mask: {}
     - Shape model class: {}
       - {} shape components
       - {} similarity transform parameters"""
        for k, s in enumerate(self.scales):
            scales_info.append(lvl_str_tmplt.format(
                    s, name_of_callable(self.holistic_features[k]),
                    name_of_callable(self.expert_ensemble_cls[k]),
                    self.expert_ensembles[k].n_experts,
                    name_of_callable(self.expert_ensembles[k]._icf),
                    self.expert_ensembles[k].patch_shape[0],
                    self.expert_ensembles[k].patch_shape[1],
                    name_of_callable(self.expert_ensembles[k].patch_normalisation),
                    self.expert_ensembles[k].context_shape[0],
                    self.expert_ensembles[k].context_shape[1],
                    self.expert_ensembles[k].cosine_mask,
                    name_of_callable(self.shape_models[k]),
                    self.shape_models[k].model.n_components,
                    self.shape_models[k].n_global_parameters))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
""".format(class_title=self._str_title,
           diagonal=diagonal,
           scales=self.scales,
           scales_info=scales_info)
        return cls_str
Exemple #15
0
    def __str__(self):
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x ** 2 + y ** 2)
        is_custom_perturb_func = self._perturb_from_gt_bounding_box != noisy_shape_from_bounding_box
        regressor_cls = self.algorithms[0]._regressor_cls

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""  - Scale {}
   - {} iterations
   - Patch shape: {}
   - Holistic feature: {}
   - Patch feature: {}"""
        for k, s in enumerate(self.scales):
            scales_info.append(
                lvl_str_tmplt.format(
                    s,
                    self.n_iterations[k],
                    self.patch_shape[k],
                    name_of_callable(self.holistic_features[k]),
                    name_of_callable(self.patch_features[k]),
                )
            )
        scales_info = "\n".join(scales_info)

        cls_str = r"""Supervised Descent Method
 - Regression performed using the {reg_alg} algorithm
   - Regression class: {reg_cls}
 - Perturbations generated per shape: {n_perturbations}
 - Images scaled to diagonal: {diagonal:.2f}
 - Custom perturbation scheme used: {is_custom_perturb_func}
 - Scales: {scales}
{scales_info}
""".format(
            reg_alg=name_of_callable(self._sd_algorithm_cls),
            reg_cls=name_of_callable(regressor_cls),
            n_perturbations=self.n_perturbations,
            diagonal=diagonal,
            is_custom_perturb_func=is_custom_perturb_func,
            scales=self.scales,
            scales_info=scales_info,
        )
        return cls_str
Exemple #16
0
def _aam_str(aam):
    if aam.diagonal is not None:
        diagonal = aam.diagonal
    else:
        y, x = aam.reference_shape.range()
        diagonal = np.sqrt(x ** 2 + y ** 2)

    # Compute scale info strings
    scales_info = []
    lvl_str_tmplt = r"""  - Scale {}
   - Holistic feature: {}
   - Appearance model class: {}
   - {} appearance components
   - Shape model class: {}
   - {} shape components"""
    for k, s in enumerate(aam.scales):
        scales_info.append(lvl_str_tmplt.format(
            s, name_of_callable(aam.holistic_features[k]),
            name_of_callable(aam.appearance_models[k]),
            aam.appearance_models[k].n_components,
            name_of_callable(aam.shape_models[k]),
            aam.shape_models[k].model.n_components))
    # Patch based AAM
    if hasattr(aam, 'patch_shape'):
        for k in range(len(scales_info)):
            scales_info[k] += '\n   - Patch shape: {}'.format(
                aam.patch_shape[k])
    scales_info = '\n'.join(scales_info)

    if aam.transform is not None:
        transform_str = 'Images warped with {} transform'.format(
            name_of_callable(aam.transform))
    else:
        transform_str = 'No image warping performed'

    cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - {transform}
 - Scales: {scales}
{scales_info}
""".format(class_title=aam._str_title,
           transform=transform_str,
           diagonal=diagonal,
           scales=aam.scales,
           scales_info=scales_info)
    return cls_str
Exemple #17
0
    def __str__(self):
        cls_str = r"""Ensemble of Correlation Filter Experts
 - {n_experts} experts
 - {icf_cls} class
 - Patch shape: {patch_height} x {patch_width}
 - Patch normalisation: {patch_norm}
 - Context shape: {context_height} x {context_width}
 - Cosine mask: {cosine_mask}""".format(
                n_experts=self.n_experts,
                icf_cls=name_of_callable(self._icf),
                patch_height=self.patch_shape[0],
                patch_width=self.patch_shape[1],
                patch_norm=name_of_callable(self.patch_normalisation),
                context_height=self.context_shape[0],
                context_width=self.context_shape[1],
                cosine_mask=self.cosine_mask)
        return cls_str
Exemple #18
0
    def __str__(self):
        cls_str = r"""Ensemble of Correlation Filter Experts
 - {n_experts} experts
 - {icf_cls} class
 - Patch shape: {patch_height} x {patch_width}
 - Patch normalisation: {patch_norm}
 - Context shape: {context_height} x {context_width}
 - Cosine mask: {cosine_mask}""".format(n_experts=self.n_experts,
                                        icf_cls=name_of_callable(self._icf),
                                        patch_height=self.patch_shape[0],
                                        patch_width=self.patch_shape[1],
                                        patch_norm=name_of_callable(
                                            self.patch_normalisation),
                                        context_height=self.context_shape[0],
                                        context_width=self.context_shape[1],
                                        cosine_mask=self.cosine_mask)
        return cls_str
Exemple #19
0
    def __str__(self):
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x ** 2 + y ** 2)
        is_custom_perturb_func = (self._perturb_from_gt_bounding_box !=
                                  noisy_shape_from_bounding_box)
        if is_custom_perturb_func:
            is_custom_perturb_func = name_of_callable(
                    self._perturb_from_gt_bounding_box)
        regressor_cls = self.algorithms[0]._regressor_cls

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {}
     - {} iterations
     - Patch shape: {}
     - Holistic feature: {}
     - Patch feature: {}"""
        for k, s in enumerate(self.scales):
            scales_info.append(lvl_str_tmplt.format(
                s, self.n_iterations[k], self.patch_shape[k],
                name_of_callable(self.holistic_features[k]),
                name_of_callable(self.patch_features[k])))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""Supervised Descent Method
 - Regression performed using the {reg_alg} algorithm
   - Regression class: {reg_cls}
 - Perturbations generated per shape: {n_perturbations}
 - Images scaled to diagonal: {diagonal:.2f}
 - Custom perturbation scheme used: {is_custom_perturb_func}
 - Scales: {scales}
{scales_info}
""".format(
            reg_alg=name_of_callable(self._sd_algorithm_cls[0]),
            reg_cls=name_of_callable(regressor_cls),
            n_perturbations=self.n_perturbations,
            diagonal=diagonal,
            is_custom_perturb_func=is_custom_perturb_func,
            scales=self.scales,
            scales_info=scales_info)
        return cls_str
Exemple #20
0
def _atm_str(atm):
    if atm.diagonal is not None:
        diagonal = atm.diagonal
    else:
        y, x = atm.reference_shape.range()
        diagonal = np.sqrt(x ** 2 + y ** 2)

    # Compute scale info strings
    scales_info = []
    lvl_str_tmplt = r"""  - Scale {}
   - Holistic feature: {}
   - Template shape: {}
   - Shape model class: {}
   - {} shape components"""
    for k, s in enumerate(atm.scales):
        scales_info.append(
            lvl_str_tmplt.format(
                s,
                name_of_callable(atm.holistic_features[k]),
                atm.warped_templates[k].shape,
                name_of_callable(atm.shape_models[k]),
                atm.shape_models[k].model.n_components,
            )
        )
    # Patch based ATM
    if hasattr(atm, "patch_shape"):
        for k in range(len(scales_info)):
            scales_info[k] += "\n   - Patch shape: {}".format(atm.patch_shape[k])
    scales_info = "\n".join(scales_info)

    cls_str = r"""{class_title}
 - Images warped with {transform} transform
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
""".format(
        class_title=atm._str_title,
        transform=name_of_callable(atm.transform),
        diagonal=diagonal,
        scales=atm.scales,
        scales_info=scales_info,
    )
    return cls_str
Exemple #21
0
    def __str__(self):
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x ** 2 + y ** 2)

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {0}
     - Cascade depth: {1}
     - Depth per tree: {2}
     - Trees per cascade level: {3}
     - Regularisation parameter: {4:.1f}
     - Feature pool of size {5} and padding {6:.1f}
     - Lambda: {7:.1f}
     - {8} split tests
     - Perturbations generated per shape: {9}
     - Total perturbations generated: {10}"""
        for k, s in enumerate(self.scales):
            scales_info.append(lvl_str_tmplt.format(
                    s,
                    self._dlib_options_templates[k].cascade_depth,
                    self._dlib_options_templates[k].tree_depth,
                    self._dlib_options_templates[k].num_trees_per_cascade_level,
                    self._dlib_options_templates[k].nu,
                    self._dlib_options_templates[k].feature_pool_size,
                    self._dlib_options_templates[k].feature_pool_region_padding,
                    self._dlib_options_templates[k].lambda_param,
                    self._dlib_options_templates[k].num_test_splits,
                    self._dlib_options_templates[k].oversampling_amount,
                    self._dlib_options_templates[k].oversampling_amount *
                    self.n_perturbations))
        scales_info = '\n'.join(scales_info)

        is_custom_perturb_func = (self._perturb_from_gt_bounding_box !=
                                  noisy_shape_from_bounding_box)
        if is_custom_perturb_func:
            is_custom_perturb_func = name_of_callable(
                    self._perturb_from_gt_bounding_box)

        cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - Perturbations generated per shape: {n_perturbations}
 - Custom perturbation scheme used: {is_custom_perturb_func}
 - Scales: {scales}
{scales_info}
""".format(class_title='Ensemble of Regression Trees',
           diagonal=diagonal,
           n_perturbations=self.n_perturbations,
           is_custom_perturb_func=is_custom_perturb_func,
           scales=self.scales,
           scales_info=scales_info)
        return cls_str
    def __str__(self):
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x**2 + y**2)

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""   - Scale {0}
     - Cascade depth: {1}
     - Depth per tree: {2}
     - Trees per cascade level: {3}
     - Regularisation parameter: {4:.1f}
     - Feature pool of size {5} and padding {6:.1f}
     - Lambda: {7:.1f}
     - {8} split tests
     - Perturbations generated per shape: {9}
     - Total perturbations generated: {10}"""
        for k, s in enumerate(self.scales):
            scales_info.append(
                lvl_str_tmplt.format(
                    s, self._dlib_options_templates[k].cascade_depth,
                    self._dlib_options_templates[k].tree_depth, self.
                    _dlib_options_templates[k].num_trees_per_cascade_level,
                    self._dlib_options_templates[k].nu,
                    self._dlib_options_templates[k].feature_pool_size, self.
                    _dlib_options_templates[k].feature_pool_region_padding,
                    self._dlib_options_templates[k].lambda_param,
                    self._dlib_options_templates[k].num_test_splits,
                    self._dlib_options_templates[k].oversampling_amount,
                    self._dlib_options_templates[k].oversampling_amount *
                    self.n_perturbations))
        scales_info = '\n'.join(scales_info)

        is_custom_perturb_func = (self._perturb_from_gt_bounding_box !=
                                  noisy_shape_from_bounding_box)
        if is_custom_perturb_func:
            is_custom_perturb_func = name_of_callable(
                self._perturb_from_gt_bounding_box)

        cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - Perturbations generated per shape: {n_perturbations}
 - Custom perturbation scheme used: {is_custom_perturb_func}
 - Scales: {scales}
{scales_info}
""".format(class_title='Ensemble of Regression Trees',
           diagonal=diagonal,
           n_perturbations=self.n_perturbations,
           is_custom_perturb_func=is_custom_perturb_func,
           scales=self.scales,
           scales_info=scales_info)
        return cls_str
Exemple #23
0
    def __str__(self):
        cls_str = r"""{}
 - Shape model class: {}
   - {} vertices, {} triangles
   - {} shape components
   - Instance class: {}
 - Texture model class: {}
   - {} texture components
   - Diagonal of {} pixels
   - Features function is {}
   - {} channels
 - Sparse landmarks class: {}
   - {} landmarks
""".format(self._str_title, name_of_callable(self.shape_model),
           self.n_vertices, self.n_triangles, self.shape_model.n_components,
           name_of_callable(self.shape_model.template_instance),
           name_of_callable(self.texture_model),
           self.texture_model.n_components, self.diagonal,
           name_of_callable(self.holistic_features), self.n_channels,
           name_of_callable(self.landmarks), self.landmarks.n_points)
        return cls_str
Exemple #24
0
    def __str__(self):
        r"""
        """
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x**2 + y**2)

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""  - Scale {}
   - Holistic feature: {}
   - Shape model class: {}
   - {} shape components
   - Expert ensemble class: {}
    - {} experts
    - Patch shape: {}"""
        for k, s in enumerate(self.scales):
            scales_info.append(
                lvl_str_tmplt.format(
                    s,
                    name_of_callable(self.holistic_features[k]),
                    name_of_callable(self.shape_models[k]),
                    self.shape_models[k].model.n_components,
                    name_of_callable(self.expert_ensembles[k]),
                    self.expert_ensembles[k].n_experts,
                    self.expert_ensembles[k].patch_shape,
                ))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
        """.format(class_title=self._str_title,
                   diagonal=diagonal,
                   scales=self.scales,
                   scales_info=scales_info)
        return cls_str
Exemple #25
0
def _atm_str(atm):
    if atm.diagonal is not None:
        diagonal = atm.diagonal
    else:
        y, x = atm.reference_shape.range()
        diagonal = np.sqrt(x ** 2 + y ** 2)

    # Compute scale info strings
    scales_info = []
    lvl_str_tmplt = r"""   - Scale {}
     - Holistic feature: {}
     - Template shape: {}
     - Shape model class: {}
       - {} shape components
       - {} similarity transform parameters"""
    for k, s in enumerate(atm.scales):
        scales_info.append(lvl_str_tmplt.format(
            s, name_of_callable(atm.holistic_features[k]),
            atm.warped_templates[k].shape,
            name_of_callable(atm.shape_models[k]),
            atm.shape_models[k].model.n_components,
            atm.shape_models[k].n_global_parameters))
    # Patch based ATM
    if hasattr(atm, 'patch_shape'):
        for k in range(len(scales_info)):
            scales_info[k] += '\n     - Patch shape: {}'.format(
                atm.patch_shape[k])
    scales_info = '\n'.join(scales_info)

    cls_str = r"""{class_title}
 - Images warped with {transform} transform
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
""".format(class_title=atm._str_title,
           transform=name_of_callable(atm.transform),
           diagonal=diagonal,
           scales=atm.scales,
           scales_info=scales_info)
    return cls_str
Exemple #26
0
def raise_costs_warning(cls):
    r"""
    Method for raising a warning in case the costs for a selected
    optimisation class cannot be computed.

    Parameters
    ----------
    cls : `class`
        The optimisation (fitting) class.
    """
    cls_name = name_of_callable(cls)
    warnings.warn("costs cannot be computed for {}".format(cls_name),
                  MenpoFitCostsWarning)
Exemple #27
0
def raise_costs_warning(cls):
    r"""
    Method for raising a warning in case the costs for a selected
    optimisation class cannot be computed.

    Parameters
    ----------
    cls : `class`
        The optimisation (fitting) class.
    """
    cls_name = name_of_callable(cls)
    warnings.warn("costs cannot be computed for {}".format(cls_name),
                  MenpoFitCostsWarning)
Exemple #28
0
    def __str__(self):
        r"""
        """
        if self.diagonal is not None:
            diagonal = self.diagonal
        else:
            y, x = self.reference_shape.range()
            diagonal = np.sqrt(x ** 2 + y ** 2)

        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""  - Scale {}
   - Holistic feature: {}
   - Shape model class: {}
   - {} shape components
   - Expert ensemble class: {}
    - {} experts
    - Patch shape: {}"""
        for k, s in enumerate(self.scales):
            scales_info.append(lvl_str_tmplt.format(
                s, name_of_callable(self.holistic_features[k]),
                name_of_callable(self.shape_models[k]),
                self.shape_models[k].model.n_components,
                name_of_callable(self.expert_ensembles[k]),
                self.expert_ensembles[k].n_experts,
                self.expert_ensembles[k].patch_shape,
            ))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""{class_title}
 - Images scaled to diagonal: {diagonal:.2f}
 - Scales: {scales}
{scales_info}
        """.format(class_title=self._str_title,
                   diagonal=diagonal,
                   scales=self.scales,
                   scales_info=scales_info)
        return cls_str
Exemple #29
0
def check_model(model, cls):
    r"""
    Function that checks whether the provided `class` object is a subclass of
    the provided base `class`.

    Parameters
    ----------
    model : `class`
        The object.
    cls : `class`
        The required base class.

    Raises
    ------
    ValueError
        Model must be a {cls} instance.
    """
    if not isinstance(model, cls):
        raise ValueError('Model must be a {} instance.'.format(
                name_of_callable(cls)))
Exemple #30
0
def check_model(model, cls):
    r"""
    Function that checks whether the provided `class` object is a subclass of
    the provided base `class`.

    Parameters
    ----------
    model : `class`
        The object.
    cls : `class`
        The required base class.

    Raises
    ------
    ValueError
        Model must be a {cls} instance.
    """
    if not isinstance(model, cls):
        raise ValueError('Model must be a {} instance.'.format(
            name_of_callable(cls)))
Exemple #31
0
    def __str__(self):
        # Compute scale info strings
        scales_info = []
        lvl_str_tmplt = r"""  - Scale {}
     - {} active shape components
     - {} similarity transform components
     - {} active appearance components"""
        for k, s in enumerate(self.scales):
            scales_info.append(lvl_str_tmplt.format(
                    s,
                    self._model.shape_models[k].model.n_active_components,
                    self._model.shape_models[k].n_global_parameters,
                    self._model.appearance_models[k].n_active_components))
        scales_info = '\n'.join(scales_info)

        cls_str = r"""{class_title}
  - Scales: {scales}
{scales_info}
    """.format(class_title=name_of_callable(self.algorithms[0]),
               scales=self.scales,
               scales_info=scales_info)
        return self._model.__str__() + cls_str
Exemple #32
0
    def decorator(labelling_method):
        # Shadowing parent scope variables inside a nested function
        # kills the scope of the parent variable, so we need a unique alias
        # for the group name
        gl = (group_label if group_label is not None
              else name_of_callable(labelling_method))
        # Duck type group label onto method itself
        labelling_method.group_label = gl
        # Set up the global docs
        labelling_method.__doc__ += _labeller_docs

        @wraps(labelling_method)
        def wrapper(x, return_mapping=False):
            from menpo.shape import PointCloud
            # Accepts PointCloud subclass or ndarray
            if isinstance(x, np.ndarray):
                x = PointCloud(x, copy=False)

            new_pcloud, mapping = labelling_method(x)
            if return_mapping:
                return new_pcloud, mapping
            else:
                return new_pcloud
        return wrapper