コード例 #1
0
    def _precompute(self):
        # Import multivariate normal distribution from scipy
        global multivariate_normal
        if multivariate_normal is None:
            from scipy.stats import multivariate_normal  # expensive

        # Build grid associated to size of the search space
        search_shape = self.expert_ensemble.search_shape
        self.search_grid = build_grid(search_shape)

        # set rho2
        self.rho2 = self.transform.model.noise_variance()

        # Compute shape model prior
        sim_prior = np.zeros((4, ))
        pdm_prior = self.rho2 / self.transform.model.eigenvalues
        self.rho2_inv_L = np.hstack((sim_prior, pdm_prior))

        # Compute Jacobian
        J = np.rollaxis(self.transform.d_dp(None), -1, 1)
        self.J = J.reshape((-1, J.shape[-1]))
        # Compute inverse Hessian
        self.JJ = self.J.T.dot(self.J)
        # Compute Jacobian pseudo-inverse
        self.pinv_J = np.linalg.solve(self.JJ, self.J.T)
        self.inv_JJ_prior = np.linalg.inv(self.JJ + np.diag(self.rho2_inv_L))
コード例 #2
0
ファイル: gd.py プロジェクト: KeeganRen/menpofit
    def _precompute(self):
        # Import multivariate normal distribution from scipy
        global multivariate_normal
        if multivariate_normal is None:
            from scipy.stats import multivariate_normal  # expensive

        # Build grid associated to size of the search space
        search_shape = self.expert_ensemble.search_shape
        self.search_grid = build_grid(search_shape)

        # set rho2
        self.rho2 = self.transform.model.noise_variance()

        # Compute shape model prior
        sim_prior = np.zeros((4,))
        pdm_prior = self.rho2 / self.transform.model.eigenvalues
        self.rho2_inv_L = np.hstack((sim_prior, pdm_prior))

        # Compute Jacobian
        J = np.rollaxis(self.transform.d_dp(None), -1, 1)
        self.J = J.reshape((-1, J.shape[-1]))
        # Compute inverse Hessian
        self.JJ = self.J.T.dot(self.J)
        # Compute Jacobian pseudo-inverse
        self.pinv_J = np.linalg.solve(self.JJ, self.J.T)
        self.inv_JJ_prior = np.linalg.inv(self.JJ + np.diag(self.rho2_inv_L))
コード例 #3
0
 def __init__(self, expert_ensemble, shape_model, kernel_covariance=10, kernel_idealmap=100,
              confidence_gama=0.5, opt=None, eps=10**-5, imgSize=256):
     self.kernel_covariance = kernel_covariance
     self.kernel_idealmap = kernel_idealmap
     self.confidence_gama = confidence_gama
     self.opt = opt
     mvnideal = multivariate_normal(mean=np.zeros(2), cov=self.kernel_idealmap)
     self.ideal_response = mvnideal.pdf(build_grid((2 * 256, 2 * 256)))
     self.rps_zeros = np.zeros((1, shape_model.n_points, 2*imgSize, 2*imgSize))
     #self.ideal_response = mvnideal.pdf(build_grid((2 * image.shape[0], 2 * image.shape[1])))
     super(RegularisedLandmarkMeanShift, self).__init__(
             expert_ensemble=expert_ensemble, shape_model=shape_model,
             eps=eps)
コード例 #4
0
ファイル: ensemble.py プロジェクト: jabooth/menpofit
def generate_gaussian_response(patch_shape, response_covariance):
    r"""
    Method that generates a Gaussian response (probability density function)
    given the desired shape and a covariance value.

    Parameters
    ----------
    patch_shape : (`int`, `int`), optional
        The shape of the response.
    response_covariance : `int`, optional
        The covariance of the generated Gaussian response.

    Returns
    -------
    pdf : ``(patch_height, patch_width)`` `ndarray`
        The generated response.
    """
    grid = build_grid(patch_shape)
    mvn = multivariate_normal(mean=np.zeros(2), cov=response_covariance)
    return mvn.pdf(grid)
コード例 #5
0
def generate_gaussian_response(patch_shape, response_covariance):
    r"""
    Method that generates a Gaussian response (probability density function)
    given the desired shape and a covariance value.

    Parameters
    ----------
    patch_shape : (`int`, `int`), optional
        The shape of the response.
    response_covariance : `int`, optional
        The covariance of the generated Gaussian response.

    Returns
    -------
    pdf : ``(patch_height, patch_width)`` `ndarray`
        The generated response.
    """
    grid = build_grid(patch_shape)
    mvn = multivariate_normal(mean=np.zeros(2), cov=response_covariance)
    return mvn.pdf(grid)
コード例 #6
0
ファイル: algorithm.py プロジェクト: zk496007331/menpofit
    def _precompute_clm(self):
        global multivariate_normal
        if multivariate_normal is None:
            from scipy.stats import multivariate_normal  # expensive

        # set inverse rho2
        self._inv_rho2 = self.pdm.model.inverse_noise_variance()

        # compute Gaussian-KDE grid
        self._sampling_grid = build_grid(self.patch_shape)
        mean = np.zeros(self.transform.n_dims)
        response_covariance = self.response_covariance * self._inv_rho2
        mvn = multivariate_normal(mean=mean, cov=response_covariance)
        self._kernel_grid = mvn.pdf(self._sampling_grid)

        # compute CLM jacobian
        j_clm = np.rollaxis(self.pdm.d_dp(None), -1, 1)
        j_clm = j_clm.reshape((-1, j_clm.shape[-1]))
        self._j_clm = self._inv_rho2 * j_clm

        # compute CLM hessian
        self._h_clm = self._j_clm.T.dot(j_clm)
コード例 #7
0
ファイル: ensemble.py プロジェクト: HaoyangWang/menpofit
def generate_gaussian_response(patch_shape, response_covariance):
    r"""
    """
    grid = build_grid(patch_shape)
    mvn = multivariate_normal(mean=np.zeros(2), cov=response_covariance)
    return mvn.pdf(grid)
コード例 #8
0
    def run(self,
            image,
            initial_shape,
            gt_shape=None,
            max_iters=20,
            return_costs=False,
            map_inference=True):
        r"""
        Execute the optimization algorithm.

        Parameters
        ----------
        image : `menpo.image.Image`
            The input test image.
        initial_shape : `menpo.shape.PointCloud`
            The initial shape from which the optimization will start.
        gt_shape : `menpo.shape.PointCloud` or ``None``, optional
            The ground truth shape of the image. It is only needed in order
            to get passed in the optimization result object, which has the
            ability to compute the fitting error.
        max_iters : `int`, optional
            The maximum number of iterations. Note that the algorithm may
            converge, and thus stop, earlier.
        return_costs : `bool`, optional
            If ``True``, then the cost function values will be computed
            during the fitting procedure. Then these cost values will be
            assigned to the returned `fitting_result`. *Note that this
            argument currently has no effect and will raise a warning if set
            to ``True``. This is because it is not possible to evaluate the
            cost function of this algorithm.*
        map_inference : `bool`, optional
            If ``True``, then the solution will be given after performing MAP
            inference.

        Returns
        -------
        fitting_result : :map:`ParametricIterativeResult`
            The parametric iterative fitting result.
        """
        # costs warning
        if return_costs:
            raise_costs_warning(self)

        patch_size = self.opt['ratio1']
        patch_size2 = self.opt['ratio2']
        search_ratio = [patch_size] * 3 + [patch_size2
                                           ] * (self.opt['numIter'] - 3)

        numLandmarks = image.rspmap_data.shape[1]

        timeFitStart = time()
        # pad responsemaps with zeros
        padH = int(image.shape[0] / 2)
        padW = int(image.shape[1] / 2)
        # image.rspmap_data = np.pad(image.rspmap_data, ((0, 0), (0, 0), (padH, padH), (padW, padW)), 'constant')
        self.rps_zeros[0, :, padH:padH + image.shape[0],
                       padW:padW + image.shape[1]] = image.rspmap_data
        image.rspmap_data = self.rps_zeros
        # cost = time() - timeFitStart
        timeFitStart = time()

        try:
            weighted_project = self.opt['ablation'][0]
            weighted_meanshift = self.opt['ablation'][1]
        except:
            weighted_project = True
            weighted_meanshift = True

        if weighted_project:
            # Correction step
            search_grid = build_grid(
                np.array(search_ratio[0] * np.array(image.shape), int))

            # Compute patch responses
            patch_responses = self.expert_ensemble.predict_probability(
                image, initial_shape, search_grid.shape)

            project_weight = rspimage.calculate_evidence(
                patch_responses,
                rate=self.confidence_gama,
                offset=self.kernel_idealmap).reshape((1, -1))

            # write project_weight
            inirho = self.opt['pdm_rho']
            # inirho = 0
            ini_rho2_inv_prior = np.hstack((np.zeros(
                (4, )), inirho / self.transform.model.eigenvalues))

            initial_shape_mean = initial_shape.points.ravel(
            ) - self.transform.model._mean
            iniJe = -self.J.T.dot(initial_shape_mean * project_weight[0])
            iniJWJ = self.J.T.dot(np.diag(project_weight[0]).dot(self.J))
            inv_JJ = np.linalg.inv(iniJWJ + np.diag(ini_rho2_inv_prior))
            initial_p = -inv_JJ.dot(iniJe)  # self.inv_JJ_prior

            # Update pdm
            self.transform._from_vector_inplace(initial_p)
        else:
            self.transform.set_target(initial_shape)

        # Initialize transform
        p_list = [self.transform.as_vector()]

        #  check boundary
        if np.any(self.transform.target.points < 0) or np.any(
                self.transform.target.points >= image.shape[0]):
            self.transform.target.points[self.transform.target.points < 0] = 1
            self.transform.target.points[self.transform.target.points >=
                                         image.shape[0]] = image.shape[0] - 2
            self.transform.set_target(self.transform.target)
            self.transform.target.points[self.transform.target.points < 0] = 1
            self.transform.target.points[self.transform.target.points >=
                                         image.shape[0]] = image.shape[0] - 2
            # print image.path.name
            # print 'out of bound'

        shapes = [self.transform.target]

        # tuning step
        # Initialize iteration counter and epsilon
        k = 0
        eps = np.Inf

        errSum = [np.Inf]
        epsList = [np.Inf]
        searchR = []
        try:
            max_iters = self.opt['numIter']
        except:
            max_iters = 10

        rho2 = self.opt['rho2']
        # timeFitStart = time()
        while k < max_iters and eps > self.eps:

            #target = self.transform.target
            target = shapes[k]

            search_grid = build_grid(
                np.array(search_ratio[k] * np.array(image.shape), int))

            # search_grid = self.search_grid *np.array(image.shape, dtype=np.float32)/np.array(image.rspmap_data.shape)[-2:-1]

            candidate_landmarks = (target.points[:, None, None, None, :] +
                                   search_grid)

            # Compute patch responses
            patch_responses = self.expert_ensemble.predict_probability(
                image, target, search_grid.shape)

            if weighted_meanshift:
                kernel_covariance = rspimage.calculate_evidence(
                    patch_responses,
                    rate=self.confidence_gama,
                    offset=self.kernel_idealmap)
            else:
                kernel_covariance = np.ones(2 * patch_responses.shape[0])

            try:
                smooth_rspmap = self.opt['smooth']
            except:
                smooth_rspmap = True

            if smooth_rspmap:
                try:
                    patch_responses /= np.sum(patch_responses,
                                              axis=(-2, -1))[..., None, None]
                except:
                    print image.path.name
                    print('Normalize fail.')

                mvnList = [
                    multivariate_normal(mean=np.zeros(2),
                                        cov=2.0 * rho2 /
                                        (kernel_covariance[2 * i] +
                                         kernel_covariance[2 * i + 1]))
                    for i in range(patch_responses.shape[0])
                ]
                kernel_grid = np.array([
                    mvnList[i].pdf(search_grid)[None]
                    for i in range(patch_responses.shape[0])
                ])

                patch_kernels = patch_responses * kernel_grid
            else:
                patch_kernels = patch_responses

            # Normalise smoothed responses
            patch_kernels /= np.sum(patch_kernels, axis=(-2, -1))[..., None,
                                                                  None]

            # Compute mean shift target
            mean_shift_target = np.sum(patch_kernels[..., None] *
                                       candidate_landmarks,
                                       axis=(-3, -2))

            # Compute shape error term
            error = mean_shift_target.ravel() - target.as_vector()
            errSum.append(np.sum(np.abs(error)))
            # error = error

            # Solve for increments on the shape parameters
            if map_inference:
                '''
                Je = (self.rho2_inv_L * self.transform.as_vector() -
                      self.J.T.dot(error))
                # inv_JJ_prior = np.linalg.inv(self.JJ + np.diag(self.rho2_inv_L))
                dp = -self.inv_JJ_prior.dot(Je)     #self.inv_JJ_prior
                '''
                Je = (self.rho2_inv_L * self.transform.as_vector() -
                      self.J.T.dot(error * kernel_covariance))
                JWJ = self.J.T.dot(np.diag(kernel_covariance).dot(self.J))
                inv_JJ_prior = np.linalg.inv(JWJ + np.diag(self.rho2_inv_L))
                dp = -inv_JJ_prior.dot(Je)  # self.inv_JJ_prior
                '''
                sim_prior = np.zeros((4,))
                pdm_prior = rho2 / self.transform.model.eigenvalues
                rho2_inv_L = np.hstack((sim_prior, pdm_prior))
                Je = (rho2_inv_L * self.transform.as_vector() -
                      self.J.T.dot(error*kernel_covariance))
                JWJ = self.J.T.dot(np.diag(kernel_covariance).dot(self.J))
                inv_JJ_prior = np.linalg.inv(JWJ + np.diag(rho2_inv_L))
                dp = -inv_JJ_prior.dot(Je)  # self.inv_JJ_prior
                '''
            else:
                J = np.rollaxis(self.transform.d_dp(target.points), -1, 1)
                J = J.reshape((-1, J.shape[-1]))
                JJ = J.T.dot(np.diag(kernel_covariance**2).dot(J))
                # Compute Jacobian pseudo-inverse
                pinv_J = np.linalg.solve(JJ, J.T.dot(
                    np.diag(kernel_covariance)))  #
                dp = pinv_J.dot(error)
                print('notMAP')

            # Update pdm
            s_k = self.transform.target.points
            self.transform._from_vector_inplace(self.transform.as_vector() +
                                                dp)
            if np.any(self.transform.target.points < 0) or np.any(
                    self.transform.target.points >= image.shape[0]):
                self.transform.target.points[
                    self.transform.target.points < 0] = 1
                self.transform.target.points[
                    self.transform.target.points >=
                    image.shape[0]] = image.shape[0] - 2
                self.transform.set_target(self.transform.target)
                self.transform.target.points[
                    self.transform.target.points < 0] = 1
                self.transform.target.points[
                    self.transform.target.points >=
                    image.shape[0]] = image.shape[0] - 2

            p_list.append(self.transform.as_vector())
            shapes.append(self.transform.target)

            # Test convergence
            eps = np.abs(np.linalg.norm(s_k - self.transform.target.points))
            epsList.append(eps)
            # Increase iteration counter
            k += 1

        cost = time() - timeFitStart
        cost = [cost]

        visualize = self.opt['verbose']

        if visualize:
            # wrFilebase = self.opt['imgDir']
            # wrFilebase = '../'

            image.landmarks['initial'] = shapes[0]
            image.landmarks['final'] = shapes[-1]

            plt.clf()
            image._view_landmarks_2d(group='final',
                                     marker_size=5,
                                     marker_edge_colour='k',
                                     marker_face_colour='g')

            plt.show(block=False)
            plt.pause(0.05)

            # plt.savefig(wrFilebase + '/output/' + image.path.stem + '.png')

        return ParametricIterativeResult(
            shapes=shapes,
            shape_parameters=p_list,
            initial_shape=
            initial_shape,  #image.landmarks['__initial_shape'] initial_shape
            image=image,
            gt_shape=gt_shape,
            costs=cost)
コード例 #9
0
ファイル: ensemble.py プロジェクト: lydonchandra/menpofit
def generate_gaussian_response(patch_shape, response_covariance):
    r"""
    """
    grid = build_grid(patch_shape)
    mvn = multivariate_normal(mean=np.zeros(2), cov=response_covariance)
    return mvn.pdf(grid)