Esempio n. 1
0
    def _update_cholesky(self, emp_covs, maxiter=10, eps=1.0e-4, verbose=0):
        """
        Core of spice: cholesky update

        Parameters
        ----------
        emp_covs: array of shape (n, p, p),
            the sample covariances for each group of obervations
        maxiter: int, optional
            max. number iterations
        eps: float, optional
            threshold value on precision change to declare convergence
        verbose: integer, optional
            verbosity mode
        """
        chol_precs_old = self.chol_precs.copy()
        delta = eps * np.sum(np.sum(chol_precs_old ** 2, axis=-1), axis=-1).min()

        # implement a L21 norm
        initial_abs_precs = np.sqrt(np.sum(self.precisions ** 2, axis=0))
        # Regularized 1/initial_abs_prec:
        abs_inv_prec0 = emp_covs.shape[0] / (initial_abs_precs + eps ** 2)
        # Put the diagonal to 0
        p = abs_inv_prec0.shape[0]
        abs_inv_prec0.flat[:: p + 1] = 0

        for i in range(maxiter):
            # L21: Call the L1 problem for all chol with the same
            # penalty (initial_abs_prec)
            for emp_cov, precision, chol_prec in zip(emp_covs, self.precisions, self.chol_precs):
                _choleski_update_l1(emp_cov, precision, abs_inv_prec0, self.l21, chol_prec=chol_prec)
            diff = np.sum(np.sum((chol_precs_old - self.chol_precs) ** 2, axis=-1), axis=-1).min()
            if diff < delta:
                if verbose:
                    print i, diff, delta
                break
            chol_precs_old = self.chol_precs.copy()

        # Don't forget to update the precision
        for precision, chol_prec in zip(self.precisions, self.chol_precs):
            precision[...] = np.dot(chol_prec.T, chol_prec)
Esempio n. 2
0
    def _update_cholesky(self, emp_cov, maxiter=10, eps=1.0e-4, regularization=1e-8, verbose=0):
        """
        Core of spice: cholesky update

        Parameters
        ----------
        emp_cov: array of shape (n, n),
            the sample covariance of the obervations
        maxiter: int, optional
            max. number iterations
        eps: float, optional
            threshold value on precision change to declare convergence
        regularization: float, optional
            regularization of the inverse
        verbose: integer, optional
            verbosity mode
        """
        chol_prec_old = self.chol_prec.copy()
        delta = eps * np.sum(chol_prec_old ** 2)

        initial_abs_prec = np.absolute(self.precision)
        # Regularized 1/initial_abs_prec:
        abs_inv_prec0 = 1 / (initial_abs_prec + regularization)
        # Put the diagonal to zero
        p = abs_inv_prec0.shape[0]
        abs_inv_prec0.flat[:: p + 1] = 0

        for i in range(maxiter):
            _choleski_update_l1(emp_cov, self.precision, abs_inv_prec0, self.l1, chol_prec=self.chol_prec)
            diff = np.sum((chol_prec_old - self.chol_prec) ** 2)
            if diff < delta:
                if verbose:
                    print i, diff, delta
                break
            chol_prec_old = self.chol_prec.copy()

        # Don't forget to update the precision
        self.precision = np.dot(self.chol_prec.T, self.chol_prec)