コード例 #1
0
ファイル: glm_poisson.py プロジェクト: vishalbelsare/regain
    def fit(self, X, y=None):
        """
        X : ndarray, shape = (n_samples * n_times, n_dimensions)
            Data matrix.
        y : added for compatiblity
        gamma: float,
            Step size of the proximal gradient descent.
        """
        X = check_array(X)
        if self.mode.lower() == "symmetric_fbs":
            raise ValueError("Not implemented.")

        elif self.mode.lower() == "coordinate_descent":
            print("sono qui")
            thetas_pred = []
            historys = []
            if self.intercept:
                X = np.hstack((X, np.ones((X.shape[0], 1))))
            for ix in range(X.shape[1]):
                verbose = max(0, self.verbose - 1)
                res = fit_each_variable(X,
                                        ix,
                                        self.alpha,
                                        tol=self.tol,
                                        verbose=verbose)
                thetas_pred.append(res[0])
                historys.append(res[1:])
            self.precision_ = build_adjacency_matrix(thetas_pred,
                                                     how=self.reconstruction)
            self.history = historys
        else:
            raise ValueError("Unknown optimization mode. Found " + self.mode +
                             ". Options are 'coordiante_descent', "
                             "'symmetric_fbs'")
        return self
コード例 #2
0
 def fit(self, X, y=None):
     """
     X : ndarray, shape = (n_samples * n_times, n_dimensions)
         Data matrix.
     y : added for compatiblity
     gamma: float,
         Step size of the proximal gradient descent.
     """
     X = check_array(X)
     if self.mode.lower() == 'symmetric_fbs':
         res = _fit(X,
                    self.alpha,
                    tol=self.tol,
                    gamma=self.gamma,
                    max_iter=self.max_iter,
                    verbose=self.verbose)
         self.precision_ = res[0]
         self.history = res[1:]
     elif self.mode.lower() == 'coordinate_descent':
         raise ValueError('Not implemented')
         # thetas_pred = []
         # historys = []
         # for ix in range(X.shape[1]):
         #     res = fit_each_variable(X, ix, self.alpha, tol=self.tol,
         #                             gamma=self.gamma,
         #                             verbose=self.verbose)
         #     thetas_pred.append(res[0])
         #     historys.append(res[1:])
         # self.precision_ = build_adjacency_matrix(thetas_pred,
         #                                          how=self.reconstruction)
         # self.history = historys
     elif self.mode.lower() == 'logistic_regression':
         thetas_pred = []
         for ix in range(X.shape[1]):
             verbose = min(0, self.verbose - 1)
             selector = np.array([i for i in range(X.shape[1]) if i != ix])
             print('pd')
             res = LogisticRegression(C=1 / self.alpha,
                                      penalty='l1',
                                      solver='liblinear',
                                      verbose=verbose,
                                      random_state=0).fit(
                                          X[:, selector], X[:, ix]).coef_
             thetas_pred.append(res)
         self.precision_ = build_adjacency_matrix(thetas_pred,
                                                  how=self.reconstruction)
     else:
         raise ValueError('Unknown optimization mode. Found ' + self.mode +
                          ". Options are 'coordiante_descent', "
                          "'symmetric_fbs'")
     return self
コード例 #3
0
ファイル: glm_gaussian.py プロジェクト: vishalbelsare/regain
 def fit(self, X, y=None, gamma=1e-3):
     """
     X : ndarray, shape = (n_samples * n_times, n_dimensions)
         Data matrix.
     y : added for compatiblity
     gamma: float,
         Step size of the proximal gradient descent.
     """
     X = check_array(X)
     thetas_pred = []
     historys = []
     for ix in range(X.shape[1]):
         res = fit_each_variable(X, ix, self.alpha)
         thetas_pred.append(res[0])
         historys.append(res[1:])
     self.precision_ = build_adjacency_matrix(thetas_pred, how=self.reconstruction)
     self.history = historys
     return self
コード例 #4
0
def _fit_time_poisson_model(
    X,
    alpha=0.01,
    rho=1,
    kernel=None,
    max_iter=100,
    verbose=False,
    psi="laplacian",
    gamma=0.1,
    tol=1e-4,
    rtol=1e-4,
    return_history=False,
    return_n_iter=True,
    compute_objective=True,
    stop_at=None,
    stop_when=1e-4,
    n_cores=-1,
):
    """Time-varying graphical model solver.

    Solves the following problem via ADMM:
        min sum_{i=1}^T -n_i log_likelihood(K_i, X_i) + alpha ||K_i||_{od,1}
            + sum_{s>t}^T k(s,t) Psi(K_s - K_t)

    where X is a matrix n_i x D, the observations at time i and the
    log-likelihood changes according to the distribution.

    Parameters
    ----------
    X : ndarray, shape (n_times, n_samples, n_features)
        Data matrix. It has to contain two values: 0 or 1, -1 or 1.
        alpha, beta : float, optional
        Regularisation parameter.
    rho : float, optional
        Augmented Lagrangian parameter.
    max_iter : int, optional
        Maximum number of iterations.
    tol : float, optional
        Absolute tolerance for convergence.
    rtol : float, optional
        Relative tolerance for convergence.
    return_history : bool, optional
        Return the history of computed values.
    init : {'empirical', 'zeros', ndarray}, default 'empirical'
        How to initialise the inverse covariance matrix. Default is take
        the empirical covariance and inverting it.

    Returns
    -------
    X : numpy.array, 2-dimensional
        Solution to the problem.
    history : list
        If return_history, then also a structure that contains the
        objective value, the primal and dual residual norms, and tolerances
        for the primal and dual residual norms at each iteration.

    """
    psi, prox_psi, psi_node_penalty = check_norm_prox(psi)
    n_times, n_samples, n_features = X.shape
    n_samples = np.array([n_samples] * n_times)

    if kernel is None:
        kernel = np.eye(n_times)

    K = np.zeros((n_times, n_features, n_features))

    Z_M = {}
    U_M = {}
    Z_M_old = {}

    for m in range(1, n_times):
        # all possible non markovians jumps
        Z_L = K.copy()[:-m]
        Z_R = K.copy()[m:]
        Z_M[m] = (Z_L, Z_R)

        U_L = np.zeros_like(Z_L)
        U_R = np.zeros_like(Z_R)
        U_M[m] = (U_L, U_R)

        Z_L_old = np.zeros_like(Z_L)
        Z_R_old = np.zeros_like(Z_R)
        Z_M_old[m] = (Z_L_old, Z_R_old)

    checks = [convergence(obj=objective(X, K, Z_M, alpha, kernel, psi))]
    for iteration_ in range(max_iter):
        # update K
        A = np.zeros_like(K)
        for m in range(1, n_times):
            A[:-m] += Z_M[m][0] - U_M[m][0]
            A[m:] += Z_M[m][1] - U_M[m][1]

        A /= n_times
        A += A.transpose(0, 2, 1)
        A /= 2.0
        # K_new = np.zeros_like(K)

        for t in range(n_times):
            thetas_pred = []
            for v in range(n_features):
                inner_verbose = max(0, verbose - 1)
                res = fit_each_variable(X[t, :, :],
                                        v,
                                        alpha,
                                        tol=tol,
                                        verbose=inner_verbose,
                                        A=A[t, :, :],
                                        T=n_times,
                                        rho=rho)
                thetas_pred.append(res[0])

            K[t, :, :] = build_adjacency_matrix(thetas_pred, "union")

        # other Zs
        for m in range(1, n_times):
            U_L, U_R = U_M[m]
            A_L = K[:-m] + U_L
            A_R = K[m:] + U_R
            if not psi_node_penalty:
                prox_e = prox_psi(A_R - A_L,
                                  lamda=2.0 *
                                  np.diag(kernel, m)[:, None, None] / rho)
                Z_L = 0.5 * (A_L + A_R - prox_e)
                Z_R = 0.5 * (A_L + A_R + prox_e)
            else:
                Z_L, Z_R = prox_psi(
                    np.concatenate((A_L, A_R), axis=1),
                    lamda=0.5 * np.diag(kernel, m)[:, None, None] / rho,
                    rho=rho,
                    tol=tol,
                    rtol=rtol,
                    max_iter=max_iter,
                )
            Z_M[m] = (Z_L, Z_R)

            # update other residuals
            U_L += K[:-m] - Z_L
            U_R += K[m:] - Z_R

        # diagnostics, reporting, termination checks
        rnorm = np.sqrt(
            sum(
                squared_norm(K[:-m] - Z_M[m][0]) +
                squared_norm(K[m:] - Z_M[m][1]) for m in range(1, n_times)))

        snorm = rho * np.sqrt(
            sum(
                squared_norm(Z_M[m][0] - Z_M_old[m][0]) +
                squared_norm(Z_M[m][1] - Z_M_old[m][1])
                for m in range(1, n_times)))

        obj = objective(X, K, Z_M, alpha, kernel,
                        psi) if compute_objective else np.nan

        check = convergence(
            obj=obj,
            rnorm=rnorm,
            snorm=snorm,
            e_pri=n_features * n_times * tol + rtol * max(
                np.sqrt(
                    sum(
                        squared_norm(Z_M[m][0]) + squared_norm(Z_M[m][1])
                        for m in range(1, n_times))),
                np.sqrt(
                    squared_norm(K) + sum(
                        squared_norm(K[:-m]) + squared_norm(K[m:])
                        for m in range(1, n_times))),
            ),
            e_dual=n_features * n_times * tol + rtol * rho * np.sqrt(
                sum(
                    squared_norm(U_M[m][0]) + squared_norm(U_M[m][1])
                    for m in range(1, n_times))),
        )
        for m in range(1, n_times):
            Z_M_old[m] = (Z_M[m][0].copy(), Z_M[m][1].copy())

        if verbose:
            print("obj: %.4f, rnorm: %.4f, snorm: %.4f,"
                  "eps_pri: %.4f, eps_dual: %.4f" % check[:5])

        checks.append(check)
        if stop_at is not None:
            if abs(check.obj - stop_at) / abs(stop_at) < stop_when:
                break

        if check.rnorm <= check.e_pri and check.snorm <= check.e_dual:
            break

        rho_new = update_rho(rho,
                             rnorm,
                             snorm,
                             iteration=iteration_,
                             **(update_rho_options or {}))
        # scaled dual variables should be also rescaled
        # U_0 *= rho / rho_new
        for m in range(1, n_times):
            U_L, U_R = U_M[m]
            U_L *= rho / rho_new
            U_R *= rho / rho_new
        rho = rho_new
    else:
        warnings.warn("Objective did not converge.")

    return_list = [K]
    if return_history:
        return_list.append(checks)
    if return_n_iter:
        return_list.append(iteration_ + 1)
    return return_list