def infimal_convolution( S, alpha=1.0, tau=1.0, rho=1.0, max_iter=100, verbose=False, tol=1e-4, rtol=1e-2, return_history=False, return_n_iter=True, update_rho_options=None, compute_objective=True, ): r"""Latent variable graphical lasso solver. Solves the following problem via ADMM: min - log_likelihood(S, K-L) + alpha ||K||_{od,1} + tau ||L_i||_* where S is the empirical covariance of the data matrix D (training observations by features). Parameters ---------- emp_cov : array-like Empirical covariance matrix. alpha, tau : float, optional Regularisation parameters. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. verbose : bool, default False Print info at each iteration. Returns ------- K, L : np.array, 2-dimensional, size (d x d) Solution to the problem. S : np.array, 2 dimensional Empirical covariance matrix. n_iter : int If return_n_iter, returns the number of iterations before convergence. history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ K = np.zeros_like(S) L = np.zeros_like(S) U = np.zeros_like(S) R_old = np.zeros_like(S) checks = [] for iteration_ in range(max_iter): # update R A = K - L - U A += A.T A /= 2.0 R = prox_laplacian(S + rho * A, lamda=rho / 2.0) A = L + R + U K = soft_thresholding(A, lamda=alpha / rho) A = K - R - U A += A.T A /= 2.0 L = prox_trace_indicator(A, lamda=tau / rho) # update residuals U += R - K + L # diagnostics, reporting, termination checks obj = objective(S, R, K, L, alpha, tau) if compute_objective else np.nan rnorm = np.linalg.norm(R - K + L) snorm = rho * np.linalg.norm(R - R_old) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(R.size) * tol + rtol * max(np.linalg.norm(R), np.linalg.norm(K - L)), e_dual=np.sqrt(R.size) * tol + rtol * rho * np.linalg.norm(U), ) R_old = R.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break if check.obj == np.inf: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled U *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") covariance_ = linalg.pinvh(K) return_list = [K, L, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_) return return_list
def prox_node_penalty(A_12, lamda, rho=1, tol=1e-4, rtol=1e-2, max_iter=500): """Lamda = beta / (2. * rho). A_12 = np.vstack((A_1, A_2)) """ n_time, _, n_dim = A_12.shape U_1 = np.full((A_12.shape[0], n_dim, n_dim), 1.0 / n_dim, dtype=float) U_2 = np.copy(U_1) Y_1 = np.copy(U_1) Y_2 = np.copy(U_1) C = np.hstack((np.eye(n_dim), -np.eye(n_dim), np.eye(n_dim))) inverse = np.linalg.inv(C.T.dot(C) + 2 * np.eye(3 * n_dim)) V = np.zeros_like(U_1) W = np.zeros_like(U_1) V_old = np.zeros_like(U_1) W_old = np.zeros_like(U_1) for iteration_ in range(max_iter): A = (Y_1 - Y_2 - W - U_1 + (W.transpose(0, 2, 1) - U_2).transpose(0, 2, 1)) / 2.0 V = blockwise_soft_thresholding_symmetric(A, lamda=lamda) A = np.concatenate(((V + U_2).transpose(0, 2, 1), A_12), axis=1) D = V + U_1 # Z = np.linalg.solve(C.T*C + eta*np.identity(3*n), - C.T*D + eta* A) Z = np.empty_like(A) for i, (A_i, D_i) in enumerate(zip(A, D)): Z[i] = inverse.dot(2 * A_i - C.T.dot(D_i)) W, Y_1, Y_2 = (Z[:, i * n_dim : (i + 1) * n_dim, :] for i in range(3)) # update residuals delta_U_1 = V + W - (Y_1 - Y_2) delta_U_2 = V - W.transpose(0, 2, 1) U_1 += delta_U_1 U_2 += delta_U_2 # diagnostics rnorm = np.sqrt(squared_norm(delta_U_1) + squared_norm(delta_U_2)) snorm = rho * np.sqrt(squared_norm(W - W_old) + squared_norm(V + W - V_old - W_old)) check = convergence( obj=np.nan, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(2 * V.size) * tol + rtol * max(np.sqrt(squared_norm(W) + squared_norm(V + W)), np.sqrt(squared_norm(V) + squared_norm(Y_1 - Y_2))), e_dual=np.sqrt(2 * V.size) * tol + rtol * rho * np.sqrt(squared_norm(U_1) + squared_norm(U_2)), ) W_old = W.copy() V_old = V.copy() # if np.linalg.norm(delta_U_1, 'fro') < tol and \ # np.linalg.norm(delta_U_2, 'fro') < tol: if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_) # scaled dual variables should be also rescaled U_1 *= rho / rho_new U_2 *= rho / rho_new rho = rho_new else: warnings.warn("Node norm did not converge.") return Y_1, Y_2
def gradient_equal_time_graphical_lasso(S, K_init, max_iter, loss, C, theta, rho, mult, weights, m, eps, psi, gamma, tol, rtol, verbose, return_history, return_n_iter, mode, compute_objective, stop_at, stop_when, update_rho_options): """Equality constrained time-varying graphical LASSO solver. Solves the following problem via ADMM: min sum_{i=1}^T ||K_i||_{od,1} + beta sum_{i=2}^T Psi(K_i - K_{i-1}) s.t. objective = c_i for i = 1, ..., T where S_i = (1/n_i) X_i^T X_i is the empirical covariance of data matrix X (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Empirical covariance of data. alpha, beta : float, optional Regularisation parameter. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. n_samples : ndarray Number of samples available for each time point. gamma: float, optional Kernel parameter when psi is chosen to be 'kernel'. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. verbose : bool, default False Print info at each iteration. update_rho_options : dict, optional Arguments for the rho update. See regain.update_rules.update_rho function for more information. compute_objective : bool, default True Choose to compute the objective value. init : {'empirical', 'zero', ndarray} Choose how to initialize the precision matrix, with the inverse empirical covariance, zero matrix or precomputed. Returns ------- K : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) if loss == 'LL': loss_func = neg_logl else: loss_func = dtrace T = S.shape[0] I = np.eye(S.shape[1]) Z_0 = K_init out_obj = [] checks = [convergence(obj=penalty_objective(Z_0, Z_0, Z_0, psi, theta))] def _Z_0(x1, x2, Z_0, loss_res, nabla_con, nabla_pen): A = Z_0 - x2 * (1 - theta) * nabla_pen # A = Z_0 - x1 * nabla_con - x2 * (1 - theta) * nabla_pen A -= x1 * loss_res[:, None, None] * nabla_con return soft_thresholding_od(A, lamda=x2 * theta), A # constrained optimisation via line search def _f(x, _Z_0, Z_0, loss_res, nabla_con, nabla_pen, loss_func, S, C): _Z_0, A = _Z_0(x[0], x[1], Z_0, loss_res, nabla_con, nabla_pen) loss_res = loss_gen(loss_func, S, _Z_0) - C # loss_res_A = loss_gen(loss_func, S, A) - C # return squared_norm(loss_res) + squared_norm(loss_res - loss_res_A) return squared_norm(loss_res) + squared_norm(_Z_0 - A) / (S.shape[1] * S.shape[2]) loss_res = loss_gen(loss_func, S, Z_0) - C for iteration_ in range(max_iter): if loss_func.__name__ == 'neg_logl': nabla_con = np.array( [S_t - np.linalg.inv(A_t) for (S_t, A_t) in zip(S, Z_0)]) # nabla = np.array([S_t - np.linalg.inv(Z_0_t) for (S_t, Z_0_t) in zip(S, Z_0_pre)]) elif loss_func.__name__ == 'dtrace': nabla_con = np.array([(2 * A_t @ S_t - I) for (S_t, A_t) in zip(S, Z_0)]) # nabla = np.array([(2 * Z_0_t @ S_t - I) for (S_t, Z_0_t) in zip(S, Z_0_pre)]) nabla_pen = grad_laplacian(Z_0) out = minimize(partial(_f, _Z_0=_Z_0, Z_0=Z_0, loss_res=loss_res, nabla_con=nabla_con, nabla_pen=nabla_pen, loss_func=loss_func, S=S, C=C), x0=np.zeros(2), method='Nelder-Mead', tol=1e-4) Z_0, _ = _Z_0(out.x[0], out.x[1], Z_0, loss_res, nabla_con, nabla_pen) loss_res = loss_gen(loss_func, S, Z_0) - C out_obj.append(penalty_objective(Z_0, Z_0[:-1], Z_0[1:], psi, theta)) if not iteration_ % 100: print(iteration_) print(np.max(loss_res), np.mean(loss_res)) print(out_obj[-1]) # print(out_obj[-1], np.max(loss_res), np.mean(loss_res)) else: warnings.warn("Objective did not converge.") print(iteration_, out_obj[-1]) # print(check.rnorm, check.e_pri) # print(check.snorm, check.e_dual) covariance_ = np.array([linalg.pinvh(x) for x in Z_0]) return_list = [Z_0, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) return return_list
def latent_time_graphical_lasso(emp_cov, alpha=0.01, tau=1., rho=1., beta=1., eta=1., max_iter=100, n_samples=None, verbose=False, psi='laplacian', phi='laplacian', mode='admm', tol=1e-4, rtol=1e-4, return_history=False, return_n_iter=True, update_rho_options=None, compute_objective=True, init='empirical'): r"""Latent variable time-varying graphical lasso solver. Solves the following problem via ADMM: min sum_{i=1}^T -n_i log_likelihood(S_i, K_i-L_i) + alpha ||K_i||_{od,1} + tau ||L_i||_* + beta sum_{i=2}^T Psi(K_i - K_{i-1}) + eta sum_{i=2}^T Phi(L_i - L_{i-1}) where S_i = (1/n_i) X_i^T \times X_i is the empirical covariance of data matrix X (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Empirical covariance of data. alpha, tau, beta, eta : float, optional Regularisation parameters. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. n_samples : ndarray Number of samples available for each time point. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. verbose : bool, default False Print info at each iteration. update_rho_options : dict, optional Arguments for the rho update. See regain.update_rules.update_rho function for more information. compute_objective : bool, default True Choose to compute the objective value. init : {'empirical', 'zeros', ndarray}, default 'empirical' How to initialise the inverse covariance matrix. Default is take the empirical covariance and inverting it. Returns ------- K, L : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) phi, prox_phi, phi_node_penalty = check_norm_prox(phi) Z_0 = init_precision(emp_cov, mode=init) Z_1 = Z_0.copy()[:-1] Z_2 = Z_0.copy()[1:] W_0 = np.zeros_like(Z_0) W_1 = np.zeros_like(Z_1) W_2 = np.zeros_like(Z_2) X_0 = np.zeros_like(Z_0) X_1 = np.zeros_like(Z_1) X_2 = np.zeros_like(Z_2) U_1 = np.zeros_like(W_1) U_2 = np.zeros_like(W_2) R_old = np.zeros_like(Z_0) Z_1_old = np.zeros_like(Z_1) Z_2_old = np.zeros_like(Z_2) W_1_old = np.zeros_like(W_1) W_2_old = np.zeros_like(W_2) # divisor for consensus variables, accounting for two less matrices divisor = np.full(emp_cov.shape[0], 3, dtype=float) divisor[0] -= 1 divisor[-1] -= 1 if n_samples is None: n_samples = np.ones(emp_cov.shape[0]) checks = [] for iteration_ in range(max_iter): # update R A = Z_0 - W_0 - X_0 A += A.transpose(0, 2, 1) A /= 2. A *= -rho / n_samples[:, None, None] A += emp_cov # A = emp_cov / rho - A R = np.array( [prox_logdet(a, lamda=ni / rho) for a, ni in zip(A, n_samples)]) # update Z_0 A = R + W_0 + X_0 A[:-1] += Z_1 - X_1 A[1:] += Z_2 - X_2 A /= divisor[:, None, None] # soft_thresholding_ = partial(soft_thresholding, lamda=alpha / rho) # Z_0 = np.array(map(soft_thresholding_, A)) Z_0 = soft_thresholding(A, lamda=alpha / (rho * divisor[:, None, None])) # update Z_1, Z_2 A_1 = Z_0[:-1] + X_1 A_2 = Z_0[1:] + X_2 if not psi_node_penalty: prox_e = prox_psi(A_2 - A_1, lamda=2. * beta / rho) Z_1 = .5 * (A_1 + A_2 - prox_e) Z_2 = .5 * (A_1 + A_2 + prox_e) else: Z_1, Z_2 = prox_psi(np.concatenate((A_1, A_2), axis=1), lamda=.5 * beta / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter) # update W_0 A = Z_0 - R - X_0 A[:-1] += W_1 - U_1 A[1:] += W_2 - U_2 A /= divisor[:, None, None] A += A.transpose(0, 2, 1) A /= 2. W_0 = np.array([ prox_trace_indicator(a, lamda=tau / (rho * div)) for a, div in zip(A, divisor) ]) # update W_1, W_2 A_1 = W_0[:-1] + U_1 A_2 = W_0[1:] + U_2 if not phi_node_penalty: prox_e = prox_phi(A_2 - A_1, lamda=2. * eta / rho) W_1 = .5 * (A_1 + A_2 - prox_e) W_2 = .5 * (A_1 + A_2 + prox_e) else: W_1, W_2 = prox_phi(np.concatenate((A_1, A_2), axis=1), lamda=.5 * eta / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter) # update residuals X_0 += R - Z_0 + W_0 X_1 += Z_0[:-1] - Z_1 X_2 += Z_0[1:] - Z_2 U_1 += W_0[:-1] - W_1 U_2 += W_0[1:] - W_2 # diagnostics, reporting, termination checks rnorm = np.sqrt( squared_norm(R - Z_0 + W_0) + squared_norm(Z_0[:-1] - Z_1) + squared_norm(Z_0[1:] - Z_2) + squared_norm(W_0[:-1] - W_1) + squared_norm(W_0[1:] - W_2)) snorm = rho * np.sqrt( squared_norm(R - R_old) + squared_norm(Z_1 - Z_1_old) + squared_norm(Z_2 - Z_2_old) + squared_norm(W_1 - W_1_old) + squared_norm(W_2 - W_2_old)) obj = objective(emp_cov, n_samples, R, Z_0, Z_1, Z_2, W_0, W_1, W_2, alpha, tau, beta, eta, psi, phi) \ if compute_objective else np.nan check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(R.size + 4 * Z_1.size) * tol + rtol * max( np.sqrt( squared_norm(R) + squared_norm(Z_1) + squared_norm(Z_2) + squared_norm(W_1) + squared_norm(W_2)), np.sqrt( squared_norm(Z_0 - W_0) + squared_norm(Z_0[:-1]) + squared_norm(Z_0[1:]) + squared_norm(W_0[:-1]) + squared_norm(W_0[1:]))), e_dual=np.sqrt(R.size + 4 * Z_1.size) * tol + rtol * rho * (np.sqrt( squared_norm(X_0) + squared_norm(X_1) + squared_norm(X_2) + squared_norm(U_1) + squared_norm(U_2)))) R_old = R.copy() Z_1_old = Z_1.copy() Z_2_old = Z_2.copy() W_1_old = W_1.copy() W_2_old = W_2.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled X_0 *= rho / rho_new X_1 *= rho / rho_new X_2 *= rho / rho_new U_1 *= rho / rho_new U_2 *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") covariance_ = np.array([linalg.pinvh(x) for x in Z_0]) return_list = [Z_0, W_0, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_) return return_list
def lasso_kernel_admm(K, y, lamda=0.01, rho=1., max_iter=100, verbose=0, rtol=1e-4, tol=1e-4, return_n_iter=True, update_rho_options=None, sample_weight=None): """Elastic Net kernel learning. Solve the following problem via ADMM: min sum_{i=1}^p 1/2 ||y_i - alpha_i * sum_{k=1}^{n_k} w_k * K_{ik}||^2 + lamda ||w||_1 + beta sum_{j=1}^{c_i}||alpha_j||_2^2 """ n_kernels, n_samples, n_features = K.shape coef = np.ones(n_kernels) # alpha = [np.zeros(K[j].shape[2]) for j in range(n_patients)] # u = [np.zeros(K[j].shape[1]) for j in range(n_patients)] w_1 = coef.copy() u_1 = np.zeros(n_kernels) # x_old = [np.zeros(K[0].shape[1]) for j in range(n_patients)] w_1_old = w_1.copy() Y = y[:, None].dot(y[:, None].T) checks = [] for iteration_ in range(max_iter): # update w KK = 2 * np.tensordot(K, K.T, axes=([1, 2], [0, 1])) yy = 2 * np.tensordot(Y, K, axes=([0, 1], [1, 2])) yy += rho * (w_1 - u_1) coef = _solve_cholesky_kernel(KK, yy[..., None], rho).ravel() w_1 = soft_thresholding(coef + u_1, lamda / rho) # w_2 = prox_laplacian(coef + u_2, beta / rho) u_1 += coef - w_1 # diagnostics, reporting, termination checks rnorm = np.sqrt(squared_norm(coef - w_1)) snorm = rho * np.sqrt(squared_norm(w_1 - w_1_old)) obj = lasso_objective(Y, coef, K, w_1, lamda) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(coef.size) * tol + rtol * max(np.sqrt(squared_norm(coef)), np.sqrt(squared_norm(w_1))), e_dual=np.sqrt(coef.size) * tol + rtol * rho * (np.sqrt(squared_norm(u_1)))) w_1_old = w_1.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual and iteration_ > 1: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled u_1 *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") return_list = [coef] if return_n_iter: return_list.append(iteration_) return return_list
def inequality_time_graphical_lasso(S, K_init, max_iter, loss, C, theta, c_prox, rho, div, psi, gamma, tol, rtol, verbose, return_history, return_n_iter, mode, compute_objective, stop_at, stop_when, update_rho_options, init): """Inequality constrained time-varying graphical LASSO solver. Solves the following problem via ADMM: min sum_{i=1}^T ||K_i||_{od,1} + beta sum_{i=2}^T Psi(K_i - K_{i-1}) s.t. objective =< c_i for i = 1, ..., T where S_i = (1/n_i) X_i^T X_i is the empirical covariance of data matrix X (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Empirical covariance of data. alpha, beta : float, optional Regularisation parameter. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. n_samples : ndarray Number of samples available for each time point. gamma: float, optional Kernel parameter when psi is chosen to be 'kernel'. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. verbose : bool, default False Print info at each iteration. update_rho_options : dict, optional Arguments for the rho update. See regain.update_rules.update_rho function for more information. compute_objective : bool, default True Choose to compute the objective value. init : {'empirical', 'zero', ndarray} Choose how to initialize the precision matrix, with the inverse empirical covariance, zero matrix or precomputed. Returns ------- K : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) psi_name = psi.__name__ if loss == 'LL': loss_function = neg_logl else: loss_function = dtrace Z_0 = K_init # init_precision(S, mode=init) Z_1 = Z_0.copy()[:-1] Z_2 = Z_0.copy()[1:] U_1 = np.zeros_like(Z_1) U_2 = np.zeros_like(Z_2) Z_0_old = np.zeros_like(Z_0) Z_1_old = np.zeros_like(Z_1) Z_2_old = np.zeros_like(Z_2) # divisor for consensus variables, accounting for one less matrix for t = 0 and t = T divisor = np.full(S.shape[0], 2, dtype=float) divisor[0] -= 1 divisor[-1] -= 1 out_obj = [] checks = [convergence(obj=penalty_objective(Z_0, Z_1, Z_2, psi, theta))] for iteration_ in range(max_iter): A_K_pen = np.zeros_like(Z_0) A_K_pen[:-1] += Z_1 - U_1 A_K_pen[1:] += Z_2 - U_2 A_K_pen += A_K_pen.transpose(0, 2, 1) A_K_pen /= 2. Z_0 = soft_thresholding_od(A_K_pen / divisor[:, None, None], lamda=theta / (rho * divisor)) # check feasibility and perform line search if necessary losses_all = loss_gen(loss_function, S, Z_0) feasibility_check = losses_all > C infeasible_indices = list( compress(range(len(feasibility_check)), feasibility_check)) for i in infeasible_indices: if c_prox == 'cvx': Z_0[i], loss_i = prox_cvx(loss_function, S[i], Z_0[i], Z_0_old[i], C[i], div) elif c_prox == 'grad': if i > 0: Z_0[i], loss_i = prox_grad(loss_function, S[i], Z_0[i], Z_0_old[i], C[i], 0.) else: Z_0[i], loss_i = prox_grad(loss_function, S[i], Z_0[i], Z_0_old[i], C[i], 0.) # break if losses post-correction blow up losses_all_new = loss_gen(loss_function, S, Z_0) if np.inf in losses_all_new: print(iteration_, 'Inf') covariance_ = np.array([linalg.pinvh(x) for x in Z_0_old]) return_list = [Z_0_old, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_) return return_list # other Zs A_1 = Z_0[:-1] + U_1 A_2 = Z_0[1:] + U_2 if not psi_node_penalty: prox_e = prox_psi(A_2 - A_1, lamda=2. * (1 - theta) / rho) Z_1 = .5 * (A_1 + A_2 - prox_e) Z_2 = .5 * (A_1 + A_2 + prox_e) else: Z_1, Z_2 = prox_psi(np.concatenate((A_1, A_2), axis=1), lamda=.5 * (1 - theta) / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter) # update residuals U_1 += Z_0[:-1] - Z_1 U_2 += Z_0[1:] - Z_2 # diagnostics, reporting, termination checks rnorm = np.sqrt( squared_norm(Z_0[:-1] - Z_1) + squared_norm(Z_0[1:] - Z_2)) snorm = rho * np.sqrt( squared_norm(Z_1 - Z_1_old) + squared_norm(Z_2 - Z_2_old)) obj = penalty_objective(Z_0, Z_1, Z_2, psi, theta) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(losses_all_new.size + 2 * Z_1.size) * tol + rtol * (max(np.sqrt(squared_norm(losses_all_new)), np.sqrt( squared_norm(C))) + max(np.sqrt(squared_norm(Z_1)), np.sqrt(squared_norm(Z_0[:-1]))) + max(np.sqrt(squared_norm(Z_2)), np.sqrt(squared_norm(Z_0[1:])))), e_dual=np.sqrt(2 * Z_1.size) * tol + rtol * rho * np.sqrt(squared_norm(U_1) + squared_norm(U_2))) Z_0_old = Z_0.copy() Z_1_old = Z_1.copy() Z_2_old = Z_2.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) out_obj.append(penalty_objective(Z_0, Z_0[:-1], Z_0[1:], psi, theta)) checks.append(check) # if len(out_obj) > 100 and c_prox == 'grad': # if (np.mean(out_obj[-11:-1]) - np.mean(out_obj[-10:])) < stop_when: # print('obj break') # break if stop_at is not None: if abs(check.obj - stop_at) / abs(stop_at) < stop_when: break if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break # rho_new = update_rho( # rho, rnorm, snorm, iteration=iteration_, # mu=1e2, tau_inc=1.01, tau_dec=1.01) # # **(update_rho_options or {})) # # scaled dual variables should be also rescaled # U_1 *= rho / rho_new # U_2 *= rho / rho_new # rho = rho_new else: warnings.warn("Objective did not converge.") print(iteration_, out_obj[-1]) # print(out_obj) print(check.rnorm, check.e_pri) print(check.snorm, check.e_dual) covariance_ = np.array([linalg.pinvh(x) for x in Z_0]) return_list = [Z_0, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) return return_list
def enet_kernel_learning_admm2( K, y, lamda=0.01, beta=0.01, rho=1., max_iter=100, verbose=0, rtol=1e-4, tol=1e-4, return_n_iter=True, update_rho_options=None): """Elastic Net kernel learning. Solve the following problem via ADMM: min sum_{i=1}^p 1/2 ||y_i - alpha_i * sum_{k=1}^{n_k} w_k * K_{ik}||^2 + lamda ||w||_1 + beta sum_{j=1}^{c_i}||alpha_j||_2^2 """ n_patients = len(K) n_kernels = len(K[0]) coef = np.ones(n_kernels) alpha = [np.zeros(K[j].shape[2]) for j in range(n_patients)] u = [np.zeros(K[j].shape[1]) for j in range(n_patients)] u_1 = np.zeros(n_kernels) w_1 = np.zeros(n_kernels) x_old = [np.zeros(K[0].shape[1]) for j in range(n_patients)] w_1_old = w_1.copy() # w_2_old = w_2.copy() checks = [] for iteration_ in range(max_iter): # update x A = [K[j].T.dot(coef) for j in range(n_patients)] x = [prox_laplacian(y[j] + rho * (A[j].T.dot(alpha[j]) - u[j]), rho / 2.) for j in range(n_patients)] # update alpha # solve (AtA + 2I)^-1 (Aty) with A = wK KK = [rho * A[j].dot(A[j].T) for j in range(n_patients)] yy = [rho * A[j].dot(x[j] + u[j]) for j in range(n_patients)] alpha = [_solve_cholesky_kernel( KK[j], yy[j][..., None], 2 * beta).ravel() for j in range(n_patients)] # equivalent to alpha_dot_K # solve (sum(AtA) + 2*rho I)^-1 (sum(Aty) + rho(w1+w2-u1-u2)) # with A = K * alpha A = [K[j].dot(alpha[j]) for j in range(n_patients)] KK = sum(A[j].dot(A[j].T) for j in range(n_patients)) yy = sum(A[j].dot(x[j] + u[j]) for j in range(n_patients)) yy += w_1 - u_1 coef = _solve_cholesky_kernel(KK, yy[..., None], 1).ravel() w_1 = soft_thresholding(coef + u_1, lamda / rho) # w_2 = prox_laplacian(coef + u_2, beta / rho) # update residuals alpha_coef_K = [ alpha[j].dot(K[j].T.dot(coef)) for j in range(n_patients)] residuals = [x[j] - alpha_coef_K[j] for j in range(n_patients)] u = [u[j] + residuals[j] for j in range(n_patients)] u_1 += coef - w_1 # diagnostics, reporting, termination checks rnorm = np.sqrt( squared_norm(coef - w_1) + sum(squared_norm(residuals[j]) for j in range(n_patients))) snorm = rho * np.sqrt( squared_norm(w_1 - w_1_old) + sum(squared_norm(x[j] - x_old[j]) for j in range(n_patients))) obj = objective_admm2(x, y, alpha, lamda, beta, w_1) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(coef.size + sum( x[j].size for j in range(n_patients))) * tol + rtol * max( np.sqrt(squared_norm(coef) + sum(squared_norm( alpha_coef_K[j]) for j in range(n_patients))), np.sqrt(squared_norm(w_1) + sum(squared_norm( x[j]) for j in range(n_patients)))), e_dual=np.sqrt(coef.size + sum( x[j].size for j in range(n_patients))) * tol + rtol * rho * ( np.sqrt(squared_norm(u_1) + sum(squared_norm( u[j]) for j in range(n_patients))))) w_1_old = w_1.copy() x_old = [x[j].copy() for j in range(n_patients)] if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual and iteration_ > 1: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled u = [u[j] * (rho / rho_new) for j in range(n_patients)] u_1 *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") return_list = [alpha, coef] if return_n_iter: return_list.append(iteration_) return return_list
def enet_kernel_learning_admm(K, y, lamda=0.01, beta=0.01, rho=1., max_iter=100, verbose=0, rtol=1e-4, tol=1e-4, return_n_iter=True, update_rho_options=None): """Elastic Net kernel learning. Solve the following problem via ADMM: min sum_{i=1}^p 1/2 ||alpha_i * w * K_i - y_i||^2 + lamda ||w||_1 + + beta||w||_2^2 """ n_patients = len(K) n_kernels = len(K[0]) coef = np.ones(n_kernels) u_1 = np.zeros(n_kernels) u_2 = np.zeros(n_kernels) w_1 = np.zeros(n_kernels) w_2 = np.zeros(n_kernels) w_1_old = w_1.copy() w_2_old = w_2.copy() checks = [] for iteration_ in range(max_iter): # update alpha # solve (AtA + 2I)^-1 (Aty) with A = wK A = [K[j].T.dot(coef) for j in range(n_patients)] KK = [A[j].dot(A[j].T) for j in range(n_patients)] yy = [y[j].dot(A[j]) for j in range(n_patients)] alpha = [ _solve_cholesky_kernel(KK[j], yy[j][..., None], 2).ravel() for j in range(n_patients) ] # alpha = [_solve_cholesky_kernel( # K_dot_coef[j], y[j][..., None], 0).ravel() for j in range(n_patients)] w_1 = soft_thresholding(coef + u_1, lamda / rho) w_2 = prox_laplacian(coef + u_2, beta / rho) # equivalent to alpha_dot_K # solve (sum(AtA) + 2*rho I)^-1 (sum(Aty) + rho(w1+w2-u1-u2)) # with A = K * alpha A = [K[j].dot(alpha[j]) for j in range(n_patients)] KK = sum(A[j].dot(A[j].T) for j in range(n_patients)) yy = sum(y[j].dot(A[j].T) for j in range(n_patients)) yy += rho * (w_1 + w_2 - u_1 - u_2) coef = _solve_cholesky_kernel(KK, yy[..., None], 2 * rho).ravel() # update residuals u_1 += coef - w_1 u_2 += coef - w_2 # diagnostics, reporting, termination checks rnorm = np.sqrt(squared_norm(coef - w_1) + squared_norm(coef - w_2)) snorm = rho * np.sqrt( squared_norm(w_1 - w_1_old) + squared_norm(w_2 - w_2_old)) obj = objective_admm(K, y, alpha, lamda, beta, coef, w_1, w_2) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(2 * coef.size) * tol + rtol * max(np.sqrt(squared_norm(coef) + squared_norm(coef)), np.sqrt(squared_norm(w_1) + squared_norm(w_2))), e_dual=np.sqrt(2 * coef.size) * tol + rtol * rho * (np.sqrt(squared_norm(u_1) + squared_norm(u_2)))) w_1_old = w_1.copy() w_2_old = w_2.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual and iteration_ > 1: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled u_1 *= rho / rho_new u_2 *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") return_list = [alpha, coef] if return_n_iter: return_list.append(iteration_) return return_list
def enet_kernel_learning_admm2(K, y, lamda=0.01, beta=0.01, rho=1., max_iter=100, verbose=0, rtol=1e-4, tol=1e-4, return_n_iter=True, update_rho_options=None): """Elastic Net kernel learning. Solve the following problem via ADMM: min sum_{i=1}^p 1/2 ||y_i - alpha_i * sum_{k=1}^{n_k} w_k * K_{ik}||^2 + lamda ||w||_1 + beta sum_{j=1}^{c_i}||alpha_j||_2^2 """ n_patients = len(K) n_kernels = len(K[0]) coef = np.ones(n_kernels) alpha = [np.zeros(K[j].shape[2]) for j in range(n_patients)] u = [np.zeros(K[j].shape[1]) for j in range(n_patients)] u_1 = np.zeros(n_kernels) w_1 = np.zeros(n_kernels) x_old = [np.zeros(K[0].shape[1]) for j in range(n_patients)] w_1_old = w_1.copy() # w_2_old = w_2.copy() checks = [] for iteration_ in range(max_iter): # update x A = [K[j].T.dot(coef) for j in range(n_patients)] x = [ prox_laplacian(y[j] + rho * (A[j].T.dot(alpha[j]) - u[j]), rho / 2.) for j in range(n_patients) ] # update alpha # solve (AtA + 2I)^-1 (Aty) with A = wK KK = [rho * A[j].dot(A[j].T) for j in range(n_patients)] yy = [rho * A[j].dot(x[j] + u[j]) for j in range(n_patients)] alpha = [ _solve_cholesky_kernel(KK[j], yy[j][..., None], 2 * beta).ravel() for j in range(n_patients) ] # equivalent to alpha_dot_K # solve (sum(AtA) + 2*rho I)^-1 (sum(Aty) + rho(w1+w2-u1-u2)) # with A = K * alpha A = [K[j].dot(alpha[j]) for j in range(n_patients)] KK = sum(A[j].dot(A[j].T) for j in range(n_patients)) yy = sum(A[j].dot(x[j] + u[j]) for j in range(n_patients)) yy += w_1 - u_1 coef = _solve_cholesky_kernel(KK, yy[..., None], 1).ravel() w_1 = soft_thresholding(coef + u_1, lamda / rho) # w_2 = prox_laplacian(coef + u_2, beta / rho) # update residuals alpha_coef_K = [ alpha[j].dot(K[j].T.dot(coef)) for j in range(n_patients) ] residuals = [x[j] - alpha_coef_K[j] for j in range(n_patients)] u = [u[j] + residuals[j] for j in range(n_patients)] u_1 += coef - w_1 # diagnostics, reporting, termination checks rnorm = np.sqrt( squared_norm(coef - w_1) + sum(squared_norm(residuals[j]) for j in range(n_patients))) snorm = rho * np.sqrt( squared_norm(w_1 - w_1_old) + sum(squared_norm(x[j] - x_old[j]) for j in range(n_patients))) obj = objective_admm2(x, y, alpha, lamda, beta, w_1) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(coef.size + sum(x[j].size for j in range(n_patients))) * tol + rtol * max( np.sqrt( squared_norm(coef) + sum( squared_norm(alpha_coef_K[j]) for j in range(n_patients))), np.sqrt( squared_norm(w_1) + sum(squared_norm(x[j]) for j in range(n_patients)))), e_dual=np.sqrt(coef.size + sum(x[j].size for j in range(n_patients))) * tol + rtol * rho * (np.sqrt( squared_norm(u_1) + sum(squared_norm(u[j]) for j in range(n_patients))))) w_1_old = w_1.copy() x_old = [x[j].copy() for j in range(n_patients)] if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual and iteration_ > 1: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled u = [u[j] * (rho / rho_new) for j in range(n_patients)] u_1 *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") return_list = [alpha, coef] if return_n_iter: return_list.append(iteration_) return return_list
def latent_time_graph_lasso( emp_cov, alpha=1, tau=1, rho=1, beta=1., eta=1., max_iter=1000, verbose=False, psi='laplacian', phi='laplacian', mode=None, tol=1e-4, rtol=1e-2, assume_centered=False, return_history=False, return_n_iter=True): r"""Time-varying latent variable graphical lasso solver. Solves the following problem via ADMM: min sum_{i=1}^T -n_i log_likelihood(K_i-L_i) + alpha ||K_i||_{od,1} + tau ||L_i||_* + beta sum_{i=2}^T Psi(K_i - K_{i-1}) + eta sum_{i=2}^T Phi(L_i - L_{i-1}) where S is the empirical covariance of the data matrix D (training observations by features). Parameters ---------- data_list : list of 2-dimensional matrices. Input matrices. alpha, tau : float, optional Regularisation parameters. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. Returns ------- K, L : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi = check_norm_prox(psi) phi, prox_phi = check_norm_prox(phi) # S = np.array(map(empirical_covariance, data_list)) # n_samples = np.array([s for s in [1.]]) K = np.zeros_like(emp_cov) Z_0 = np.zeros_like(K) Z_1 = np.zeros_like(K)[:-1] Z_2 = np.zeros_like(K)[1:] W_0 = np.zeros_like(K) W_1 = np.zeros_like(K)[:-1] W_2 = np.zeros_like(K)[1:] X_0 = np.zeros_like(K) X_1 = np.zeros_like(K)[:-1] X_2 = np.zeros_like(K)[1:] Z_consensus = np.zeros_like(K) # Z_consensus_old = np.zeros_like(K) W_consensus = np.zeros_like(K) # W_consensus_old = np.zeros_like(K) R_old = np.zeros_like(K) # divisor for consensus variables, accounting for two less matrices divisor = np.full(K.shape[0], 3, dtype=float) divisor[0] -= 1 divisor[-1] -= 1 checks = [] for iteration_ in range(max_iter): # update R A = Z_0 - W_0 - X_0 A[:-1] += Z_1 - W_1 - X_1 A[1:] += Z_2 - W_2 - X_2 A /= divisor[:, None, None] # A += np.array(map(np.transpose, A)) # A /= 2. # A *= - rho / n_samples[:, None, None] A *= - rho A += emp_cov R = np.array([prox_logdet(a, lamda=1. / rho) for a in A]) # update Z_0 # Zold = Z # X_hat = alpha * X + (1 - alpha) * Zold soft_thresholding = partial(soft_thresholding_sign, lamda=alpha / rho) Z_0 = np.array(map(soft_thresholding, R + W_0 + X_0)) # update Z_1, Z_2 # prox_l = partial(prox_laplacian, beta=2. * beta / rho) # prox_e = np.array(map(prox_l, K[1:] - K[:-1] + U_2 - U_1)) if beta != 0: A_1 = R[:-1] + W_1 + X_1 # A_1 = Z_0[:-1].copy() A_2 = R[1:] + W_2 + X_2 # A_2 = Z_0[1:].copy() prox_e = prox_psi(A_2 - A_1, lamda=2. * beta / rho) Z_1 = .5 * (A_1 + A_2 - prox_e) Z_2 = .5 * (A_1 + A_2 + prox_e) else: Z_1 = Z_0[:-1].copy() Z_2 = Z_0[1:].copy() # update W_0 A = Z_0 - R - X_0 W_0 = np.array(map(partial(prox_trace_indicator, lamda=tau / rho), A)) # update W_1, W_2 if eta != 0: A_1 = Z_1 - R[:-1] - X_1 # A_1 = W_0[:-1].copy() A_2 = Z_2 - R[1:] - X_2 # A_2 = W_0[1:].copy() prox_e = prox_phi(A_2 - A_1, lamda=2. * eta / rho) W_1 = .5 * (A_1 + A_2 - prox_e) W_2 = .5 * (A_1 + A_2 + prox_e) else: W_1 = W_0[:-1].copy() W_2 = W_0[1:].copy() # update residuals X_0 += R - Z_0 + W_0 X_1 += R[:-1] - Z_1 + W_1 X_2 += R[1:] - Z_2 + W_2 # diagnostics, reporting, termination checks X_consensus = X_0.copy() X_consensus[:-1] += X_1 X_consensus[1:] += X_2 X_consensus /= divisor[:, None, None] Z_consensus = Z_0.copy() Z_consensus[:-1] += Z_1 Z_consensus[1:] += Z_2 Z_consensus /= divisor[:, None, None] W_consensus = W_0.copy() W_consensus[:-1] += W_1 W_consensus[1:] += W_2 W_consensus /= divisor[:, None, None] check = convergence( obj=objective(emp_cov, R, Z_0, Z_1, Z_2, W_0, W_1, W_2, alpha, tau, beta, eta, psi, phi), rnorm=np.linalg.norm(R - Z_consensus + W_consensus), snorm=np.linalg.norm(rho * (R - R_old)), e_pri=np.sqrt(np.prod(K.shape)) * tol + rtol * max( np.linalg.norm(R), np.sqrt(squared_norm(Z_consensus) - squared_norm(W_consensus))), e_dual=np.sqrt(np.prod(K.shape)) * tol + rtol * np.linalg.norm( rho * X_consensus) ) R_old = R.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break # if iteration_ % 10 == 0: # rho = rho * 0.8 else: warnings.warn("Objective did not converge.") # return_list = [Z_consensus, W_consensus, emp_cov] return_list = [Z_consensus, W_0, W_1, W_2, emp_cov] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_) return return_list
def taylor_time_graphical_lasso( S, K_init, max_iter, loss, C, theta, rho, mult, weights, m, eps, psi, gamma, tol, rtol, verbose, return_history, return_n_iter, mode, compute_objective, stop_at, stop_when, update_rho_options ): """Equality constrained time-varying graphical LASSO solver. Solves the following problem via ADMM: min sum_{i=1}^T ||K_i||_{od,1} + beta sum_{i=2}^T Psi(K_i - K_{i-1}) s.t. objective = c_i for i = 1, ..., T where S_i = (1/n_i) X_i^T X_i is the empirical covariance of data matrix X (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Empirical covariance of data. alpha, beta : float, optional Regularisation parameter. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. n_samples : ndarray Number of samples available for each time point. gamma: float, optional Kernel parameter when psi is chosen to be 'kernel'. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. verbose : bool, default False Print info at each iteration. update_rho_options : dict, optional Arguments for the rho update. See regain.update_rules.update_rho function for more information. compute_objective : bool, default True Choose to compute the objective value. init : {'empirical', 'zero', ndarray} Choose how to initialize the precision matrix, with the inverse empirical covariance, zero matrix or precomputed. Returns ------- K : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) if loss == 'LL': loss_func = neg_logl else: loss_func = dtrace T = S.shape[0] S_flat = S.copy().reshape(T, S.shape[1] * S.shape[2]) I_flat = np.diagflat(S.shape[1]).ravel() K = K_init.copy() Z_0 = K_init.copy() Z_1 = Z_0.copy()[:-1] Z_2 = Z_0.copy()[1:] u = np.zeros(T) U_0 = np.zeros_like(Z_0) U_1 = np.zeros_like(Z_1) U_2 = np.zeros_like(Z_2) Z_0_old = Z_0.copy() Z_1_old = np.zeros_like(Z_1) Z_2_old = np.zeros_like(Z_2) # divisor for consensus variables, accounting for one less matrix for t = 0 and t = T divisor = np.full(T, 3, dtype=float) divisor[0] -= 1 divisor[-1] -= 1 rho = rho * np.ones(T) if weights[0] is not None: if weights[0] == 'rbf': weights = rbf_weights(T, weights[1], mult) elif weights[0] == 'exp': weights = exp_weights(T, weights[1], mult) elif weights[0] == 'lin': weights = lin_weights(T, weights[1], mult) con_obj = {} for t in range(T): con_obj[t] = [] con_obj_mean = [] con_obj_max = [] # loss residuals loss_res = np.zeros(T) loss_init = loss_gen(loss_func, S, Z_0_old) loss_res_old = loss_init - C # loss_diff = C - loss_init # C_ = C - loss_diff out_obj = [] checks = [ convergence( obj=penalty_objective(Z_0, Z_1, Z_2, psi, theta)) ] def _K(x, A_t, g_t, nabla_t, nabla_t_T_A_t, nabla_t_T_nabla_t, rho_t, divisor_t): _K_t = (A_t + x * g_t * nabla_t - (x * nabla_t_T_A_t + x ** 2 * g_t * nabla_t_T_nabla_t) * nabla_t / (divisor_t * rho_t + x * nabla_t_T_nabla_t) ).reshape(S.shape[1], S.shape[2]) _K_t /= (rho_t * divisor_t) return 0.5 * (_K_t + _K_t.transpose(1, 0)) # def _K(x, A_t, nabla_t): # _A_t = A_t - x * nabla_t # return _A_t # constrained optimisation via line search def _f(x, _K, A_t, g_t, nabla_t, nabla_t_T_A_t, nabla_t_T_nabla_t, rho_t, divisor_t, loss_func, S_t, c_t, loss_res_old_t, nabla_t_T_K_old_t): _K_t = _K(x, A_t, g_t, nabla_t, nabla_t_T_A_t, nabla_t_T_nabla_t, rho_t, divisor_t) loss_res_t = loss_func(S_t, _K_t) - c_t return loss_res_t ** 2 + (loss_res_t - loss_res_old_t - nabla_t @ _K_t.ravel() + nabla_t_T_K_old_t) ** 2 # # constrained optimisation via line search # def _f(x, _K, A_t, nabla_t, loss_func, S_t, c_t, loss_res_old_t): # _K_t = _K(x, A_t, nabla_t) # loss_res_t = loss_func(S_t, _K_t) - c_t # return loss_res_t ** 2 + (loss_res_t - loss_res_old_t - np.sum(nabla_t * (_K_t - A_t))) ** 2 for iteration_ in range(max_iter): # update K A = rho[:, None, None] * (Z_0 - U_0) A[:-1] += rho[:-1, None, None] * (Z_1 - U_1) A[1:] += rho[1:, None, None] * (Z_2 - U_2) # A += A.transpose(0, 2, 1) # A /= 2. # A /= (rho * divisor)[:, None, None] # loss_res_pre = loss_gen(loss_func, S, A) - C if loss_func.__name__ == 'neg_logl': nabla = np.array([S_t - np.linalg.inv(K_t).ravel() for (S_t, K_t) in zip(S_flat, K)]) # nabla = np.array([S_t - np.linalg.inv(K_t) for (S_t, K_t) in zip(S, A)]) elif loss_func.__name__ == 'dtrace': nabla = np.array([(2 * K_t.ravel() @ S_t - I) for (S_t, K_t) in zip(S_flat, K)]) # nabla = np.array([(2 * K_t @ S_t - I) for (S_t, K_t) in zip(S, K)]) nabla_T_K_old = np.array([nabla_t @ K_t.ravel() for (nabla_t, K_t) in zip(nabla, K)]) # nabla_T_K_old = np.array([np.sum(nabla_t * K_t) for (nabla_t, K_t) in zip(nabla, K)]) g = nabla_T_K_old - loss_res_old nabla_T_A = np.array([nabla_t @ A_t.ravel() for (nabla_t, A_t) in zip(nabla, A)]) nabla_T_nabla = np.einsum('ij,ij->i', nabla, nabla) if iteration_ == 0: nabla = np.zeros_like(S_flat) # nabla = np.zeros_like(S) nabla_T_K_old = np.zeros(T) g = np.zeros(T) nabla_T_A = np.zeros(T) nabla_T_nabla = np.zeros(T) col = [] for t in range(T): out = minimize_scalar( partial(_f, _K=_K, A_t=A[t].ravel(), g_t=g[t], nabla_t=nabla[t], nabla_t_T_A_t=nabla_T_A[t], nabla_t_T_nabla_t=nabla_T_nabla[t], rho_t=rho[t], divisor_t=divisor[t], loss_func=loss_func, S_t=S[t], c_t=C[t], loss_res_old_t=loss_res_old[t], nabla_t_T_K_old_t=nabla_T_K_old[t]) ) # out = minimize_scalar( # partial(_f, _K=_K, A_t=A[t], nabla_t=nabla[t], loss_func=loss_func, # S_t=S[t], c_t=C[t], loss_res_old_t=loss_res_pre[t]) # ) K[t] = _K(out.x, A[t].ravel(), g[t], nabla[t], nabla_T_A[t], nabla_T_nabla[t], rho[t], divisor[t]) # K[t] = _K(out.x, A[t], nabla[t]) loss_res[t] = loss_func(S[t], K[t]) - C[t] # u[t] += loss_res[t] if weights[0] is not None: con_obj[t].append(loss_res[t] ** 2) if len(con_obj[t]) > m and np.mean(con_obj[t][-m:-int(m/2)]) < np.mean(con_obj[t][-int(m/2):]) and loss_res[t] > eps: col.append(t) # update Z_0 _Z_0 = K + U_0 _Z_0 += _Z_0.transpose(0, 2, 1) _Z_0 /= 2. Z_0 = soft_thresholding_od(_Z_0, lamda=theta / rho[:, None, None]) # update Z_1, Z_2 A_1 = Z_0[:-1] + U_1 A_2 = Z_0[1:] + U_2 if not psi_node_penalty: A_add = A_2 + A_1 A_sub = A_2 - A_1 prox_e_1 = prox_psi(A_sub, lamda=2. * (1 - theta) / rho[:-1, None, None]) prox_e_2 = prox_psi(A_sub, lamda=2. * (1 - theta) / rho[1:, None, None]) Z_1 = .5 * (A_add - prox_e_1) Z_2 = .5 * (A_add + prox_e_2) # TODO: Fix for rho vector # else: # if weights is not None: # Z_1, Z_2 = prox_psi( # np.concatenate((A_1, A_2), axis=1), lamda=.5 * (1 - theta) / rho[t], # rho=rho[t], tol=tol, rtol=rtol, max_iter=max_iter) # update residuals con_obj_mean.append(np.mean(loss_res) ** 2) con_obj_max.append(np.max(loss_res)) U_0 += K - Z_0 U_1 += K[:-1] - Z_1 U_2 += K[1:] - Z_2 # diagnostics, reporting, termination checks rnorm = np.sqrt( squared_norm(K - Z_0) + squared_norm(K[:-1] - Z_1) + squared_norm(K[1:] - Z_2) ) loss_res_old = loss_res.copy() snorm = np.sqrt( squared_norm(rho[:, None, None] * (Z_0 - Z_0_old)) + squared_norm(rho[:-1, None, None] * (Z_1 - Z_1_old)) + squared_norm(rho[1:, None, None] * (Z_2 - Z_2_old)) ) e_dual = np.sqrt(Z_0.size + 2 * Z_1.size) * tol + rtol * np.sqrt( squared_norm(rho[:, None, None] * U_0) + squared_norm(rho[:-1, None, None] * U_1) + squared_norm(rho[1:, None, None] * U_2) ) obj = objective(loss_res, Z_0, Z_1, Z_2, psi, theta) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(loss_res.size + Z_0.size + 2 * Z_1.size) * tol + rtol * ( max(np.sqrt(squared_norm(Z_0)), np.sqrt(squared_norm(K))) + max(np.sqrt(squared_norm(Z_1)), np.sqrt(squared_norm(K[:-1]))) + max(np.sqrt(squared_norm(Z_2)), np.sqrt(squared_norm(K[1:]))) ), e_dual=e_dual ) Z_0_old = Z_0.copy() Z_1_old = Z_1.copy() Z_2_old = Z_2.copy() if verbose: print( "obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) out_obj.append(penalty_objective(Z_0, Z_0[:-1], Z_0[1:], psi, theta)) if not iteration_ % 100: print(iteration_) print(np.max(con_obj_max[-1]), np.mean(loss_res)) print(out_obj[-1]) checks.append(check) if stop_at is not None: if abs(check.obj - stop_at) / abs(stop_at) < stop_when: break if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break if weights[0] is None: if len(con_obj_mean) > m: if np.mean(con_obj_mean[-m:-int(m/2)]) < np.mean(con_obj_mean[-int(m/2):]) and np.max(loss_res) > eps: # or np.mean(con_obj_max[-100:-50]) < np.mean(con_obj_max[-50:])) # np.mean(loss_res) > 0.25: print("Rho Mult", mult * rho[0], iteration_, np.mean(loss_res), con_obj_max[-1]) # loss_diff /= 5 # C_ = C - loss_diff # resscale scaled dual variables rho = mult * rho # u /= mult U_0 /= mult U_1 /= mult U_2 /= mult con_obj_mean = [] con_obj_max = [] else: for t in col: rho *= weights[t] # u /= weights[t] U_0 /= weights[t][:, None, None] U_1 /= weights[t][:-1, None, None] U_2 /= weights[t][1:, None, None] con_obj[t] = [] print('Mult', iteration_, t, rho[t]) else: warnings.warn("Objective did not converge.") print(iteration_, out_obj[-1]) # print(out_obj) print(check.rnorm, check.e_pri) print(check.snorm, check.e_dual) covariance_ = np.array([linalg.pinvh(x) for x in Z_0]) return_list = [Z_0, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) return return_list
def graphical_lasso( emp_cov, alpha=0.01, rho=1, over_relax=1, max_iter=100, verbose=False, tol=1e-4, rtol=1e-4, return_history=False, return_n_iter=True, update_rho_options=None, compute_objective=True, init="empirical", ): r"""Graphical lasso solver via ADMM. Solves the following problem: minimize trace(S*K) - log det K + alpha ||K||_{od,1} where S = (1/n) X^T \times X is the empirical covariance of the data matrix X (training observations by features). Parameters ---------- emp_cov : array-like Empirical covariance matrix. alpha : float, optional Regularisation parameter. rho : float, optional Augmented Lagrangian parameter. over_relax : float, optional Over-relaxation parameter (typically between 1.0 and 1.8). max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. verbose : bool, default False Print info at each iteration. update_rho_options : dict, optional Arguments for the rho update. See regain.update_rules.update_rho function for more information. compute_objective : bool, default True Choose to compute the objective value. init : {'empirical', 'zeros', ndarray}, default 'empirical' How to initialise the inverse covariance matrix. Default is take the empirical covariance and inverting it. Returns ------- precision_ : numpy.array, 2-dimensional Solution to the problem. covariance_ : np.array, 2 dimensional Empirical covariance matrix. n_iter_ : int If return_n_iter, returns the number of iterations before convergence. history_ : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ Z = init_precision(emp_cov, mode=init) U = np.zeros_like(emp_cov) Z_old = np.zeros_like(Z) checks = [] for iteration_ in range(max_iter): # x-update A = Z - U A += A.T A /= 2.0 K = prox_logdet(emp_cov - rho * A, lamda=1.0 / rho) # z-update with relaxation K_hat = over_relax * K - (1 - over_relax) * Z Z = soft_thresholding_od(K_hat + U, lamda=alpha / rho) # update residuals U += K_hat - Z # diagnostics, reporting, termination checks obj = objective(emp_cov, K, Z, alpha) if compute_objective else np.nan rnorm = np.linalg.norm(K - Z, "fro") snorm = rho * np.linalg.norm(Z - Z_old, "fro") check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(K.size) * tol + rtol * max(np.linalg.norm(K, "fro"), np.linalg.norm(Z, "fro")), e_dual=np.sqrt(K.size) * tol + rtol * rho * np.linalg.norm(U), ) Z_old = Z.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled U *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") return_list = [Z, emp_cov] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_) return return_list
def kernel_time_graphical_lasso( emp_cov, alpha=0.01, rho=1, kernel=None, max_iter=100, n_samples=None, verbose=False, psi="laplacian", tol=1e-4, rtol=1e-4, return_history=False, return_n_iter=True, mode="admm", update_rho_options=None, compute_objective=True, stop_at=None, stop_when=1e-4, init="empirical", ): """Time-varying graphical lasso solver. Solves the following problem via ADMM: min sum_{i=1}^T -n_i log_likelihood(K_i-L_i) + alpha ||K_i||_{od,1} + sum_{s>t}^T k_psi(s,t) Psi(K_s - K_t) where S is the empirical covariance of the data matrix D (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Empirical covariance of data. alpha, beta : float, optional Regularisation parameter. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. init : {'empirical', 'zeros', ndarray}, default 'empirical' How to initialise the inverse covariance matrix. Default is take the empirical covariance and inverting it. Returns ------- X : numpy.array, 2-dimensional Solution to the problem. history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) n_times, _, n_features = emp_cov.shape if kernel is None: kernel = np.eye(n_times) Z_0 = init_precision(emp_cov, mode=init) U_0 = np.zeros_like(Z_0) Z_0_old = np.zeros_like(Z_0) Z_M, Z_M_old = {}, {} U_M = {} for m in range(1, n_times): # all possible markovians jumps Z_L = Z_0.copy()[:-m] Z_R = Z_0.copy()[m:] Z_M[m] = (Z_L, Z_R) U_L = np.zeros_like(Z_L) U_R = np.zeros_like(Z_R) U_M[m] = (U_L, U_R) Z_L_old = np.zeros_like(Z_L) Z_R_old = np.zeros_like(Z_R) Z_M_old[m] = (Z_L_old, Z_R_old) if n_samples is None: n_samples = np.ones(n_times) checks = [ convergence(obj=objective(n_samples, emp_cov, Z_0, Z_0, Z_M, alpha, kernel, psi)) ] for iteration_ in range(max_iter): # update K A = Z_0 - U_0 for m in range(1, n_times): A[:-m] += Z_M[m][0] - U_M[m][0] A[m:] += Z_M[m][1] - U_M[m][1] A /= n_times # soft_thresholding_ = partial(soft_thresholding, lamda=alpha / rho) # K = np.array(map(soft_thresholding_, A)) A += A.transpose(0, 2, 1) A /= 2.0 A *= -rho * n_times / n_samples[:, None, None] A += emp_cov K = np.array([ prox_logdet(a, lamda=ni / (rho * n_times)) for a, ni in zip(A, n_samples) ]) # update Z_0 A = K + U_0 A += A.transpose(0, 2, 1) A /= 2.0 Z_0 = soft_thresholding(A, lamda=alpha / rho) # update residuals U_0 += K - Z_0 # other Zs for m in range(1, n_times): U_L, U_R = U_M[m] A_L = K[:-m] + U_L A_R = K[m:] + U_R if not psi_node_penalty: prox_e = prox_psi(A_R - A_L, lamda=2.0 * np.diag(kernel, m)[:, None, None] / rho) Z_L = 0.5 * (A_L + A_R - prox_e) Z_R = 0.5 * (A_L + A_R + prox_e) else: Z_L, Z_R = prox_psi( np.concatenate((A_L, A_R), axis=1), lamda=0.5 * np.diag(kernel, m)[:, None, None] / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter, ) Z_M[m] = (Z_L, Z_R) # update other residuals U_L += K[:-m] - Z_L U_R += K[m:] - Z_R # diagnostics, reporting, termination checks rnorm = np.sqrt( squared_norm(K - Z_0) + sum( squared_norm(K[:-m] - Z_M[m][0]) + squared_norm(K[m:] - Z_M[m][1]) for m in range(1, n_times))) snorm = rho * np.sqrt( squared_norm(Z_0 - Z_0_old) + sum( squared_norm(Z_M[m][0] - Z_M_old[m][0]) + squared_norm(Z_M[m][1] - Z_M_old[m][1]) for m in range(1, n_times))) obj = objective(n_samples, emp_cov, Z_0, K, Z_M, alpha, kernel, psi) if compute_objective else np.nan check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=n_features * n_times * tol + rtol * max( np.sqrt( squared_norm(Z_0) + sum( squared_norm(Z_M[m][0]) + squared_norm(Z_M[m][1]) for m in range(1, n_times))), np.sqrt( squared_norm(K) + sum( squared_norm(K[:-m]) + squared_norm(K[m:]) for m in range(1, n_times))), ), e_dual=n_features * n_times * tol + rtol * rho * np.sqrt( squared_norm(U_0) + sum( squared_norm(U_M[m][0]) + squared_norm(U_M[m][1]) for m in range(1, n_times))), ) Z_0_old = Z_0.copy() for m in range(1, n_times): Z_M_old[m] = (Z_M[m][0].copy(), Z_M[m][1].copy()) if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) checks.append(check) if stop_at is not None: if abs(check.obj - stop_at) / abs(stop_at) < stop_when: break if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled U_0 *= rho / rho_new for m in range(1, n_times): U_L, U_R = U_M[m] U_L *= rho / rho_new U_R *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") covariance_ = np.array([linalg.pinvh(x) for x in Z_0]) return_list = [Z_0, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) return return_list
def time_graph_lasso(data_list, lamda=1, beta=1, max_iter=1000, verbose=False, tol=1e-4, rtol=1e-2, return_history=False): """Time-varying graphical lasso solver. Solves the following problem via ADMM: minimize trace(S*X) - log det X + lambda*||X||_1 where S is the empirical covariance of the data matrix D (training observations by features). Parameters ---------- data_list : list of 2-dimensional matrices. Input matrices. lamda : float, optional Regularisation parameter. rho : float, optional Augmented Lagrangian parameter. alpha : float, optional Over-relaxation parameter (typically between 1.0 and 1.8). max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. Returns ------- X : numpy.array, 2-dimensional Solution to the problem. history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ S = np.array(map(empirical_covariance, data_list)) n_samples = np.array([s.shape[0] for s in data_list]) K = np.zeros_like(S) # divisor for consensus variables, accounting for two less matrices divisor = np.zeros(S.shape[0]) + 3 divisor[0] -= 1 divisor[-1] -= 1 checks = [] alpha = 1 Kold = np.ones_like(S) + 5000 for _ in range(max_iter): for k in range(S.shape[0]): K[k].flat[::K.shape[1] + 1] = 1 alpha_old = alpha # choose a gamma gamma = .75 # total variation Y = _J(K, beta, lamda, gamma, 1, S, n_samples) alpha = choose_alpha(alpha_old, K, S, n_samples, beta, lamda, gamma) alpha = 1 K = Kold + alpha * (Y - Kold) check = convergence( obj=objective(n_samples, S, K, lamda, beta, l1_norm), rnorm=np.linalg.norm(K - Kold), snorm=20, # np.linalg.norm( # rho * (Z_consensus - Z_consensus_old)), e_pri=30, # np.sqrt(np.prod(K.shape)) * tol + rtol * max( # np.linalg.norm(K), np.linalg.norm(Z_consensus)), e_dual=30 # np.sqrt(np.prod(K.shape)) * tol + rtol * np.linalg.norm( # rho * U_consensus) ) if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) # if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: # break if check.rnorm <= tol: break Kold = K.copy() else: warnings.warn("Objective did not converge.") if return_history: return K, S, checks return K, S
def _fit_time_poisson_model( X, alpha=0.01, rho=1, kernel=None, max_iter=100, verbose=False, psi="laplacian", gamma=0.1, tol=1e-4, rtol=1e-4, return_history=False, return_n_iter=True, compute_objective=True, stop_at=None, stop_when=1e-4, n_cores=-1, ): """Time-varying graphical model solver. Solves the following problem via ADMM: min sum_{i=1}^T -n_i log_likelihood(K_i, X_i) + alpha ||K_i||_{od,1} + sum_{s>t}^T k(s,t) Psi(K_s - K_t) where X is a matrix n_i x D, the observations at time i and the log-likelihood changes according to the distribution. Parameters ---------- X : ndarray, shape (n_times, n_samples, n_features) Data matrix. It has to contain two values: 0 or 1, -1 or 1. alpha, beta : float, optional Regularisation parameter. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. init : {'empirical', 'zeros', ndarray}, default 'empirical' How to initialise the inverse covariance matrix. Default is take the empirical covariance and inverting it. Returns ------- X : numpy.array, 2-dimensional Solution to the problem. history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) n_times, n_samples, n_features = X.shape n_samples = np.array([n_samples] * n_times) if kernel is None: kernel = np.eye(n_times) K = np.zeros((n_times, n_features, n_features)) Z_M = {} U_M = {} Z_M_old = {} for m in range(1, n_times): # all possible non markovians jumps Z_L = K.copy()[:-m] Z_R = K.copy()[m:] Z_M[m] = (Z_L, Z_R) U_L = np.zeros_like(Z_L) U_R = np.zeros_like(Z_R) U_M[m] = (U_L, U_R) Z_L_old = np.zeros_like(Z_L) Z_R_old = np.zeros_like(Z_R) Z_M_old[m] = (Z_L_old, Z_R_old) checks = [convergence(obj=objective(X, K, Z_M, alpha, kernel, psi))] for iteration_ in range(max_iter): # update K A = np.zeros_like(K) for m in range(1, n_times): A[:-m] += Z_M[m][0] - U_M[m][0] A[m:] += Z_M[m][1] - U_M[m][1] A /= n_times A += A.transpose(0, 2, 1) A /= 2.0 # K_new = np.zeros_like(K) for t in range(n_times): thetas_pred = [] for v in range(n_features): inner_verbose = max(0, verbose - 1) res = fit_each_variable(X[t, :, :], v, alpha, tol=tol, verbose=inner_verbose, A=A[t, :, :], T=n_times, rho=rho) thetas_pred.append(res[0]) K[t, :, :] = build_adjacency_matrix(thetas_pred, "union") # other Zs for m in range(1, n_times): U_L, U_R = U_M[m] A_L = K[:-m] + U_L A_R = K[m:] + U_R if not psi_node_penalty: prox_e = prox_psi(A_R - A_L, lamda=2.0 * np.diag(kernel, m)[:, None, None] / rho) Z_L = 0.5 * (A_L + A_R - prox_e) Z_R = 0.5 * (A_L + A_R + prox_e) else: Z_L, Z_R = prox_psi( np.concatenate((A_L, A_R), axis=1), lamda=0.5 * np.diag(kernel, m)[:, None, None] / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter, ) Z_M[m] = (Z_L, Z_R) # update other residuals U_L += K[:-m] - Z_L U_R += K[m:] - Z_R # diagnostics, reporting, termination checks rnorm = np.sqrt( sum( squared_norm(K[:-m] - Z_M[m][0]) + squared_norm(K[m:] - Z_M[m][1]) for m in range(1, n_times))) snorm = rho * np.sqrt( sum( squared_norm(Z_M[m][0] - Z_M_old[m][0]) + squared_norm(Z_M[m][1] - Z_M_old[m][1]) for m in range(1, n_times))) obj = objective(X, K, Z_M, alpha, kernel, psi) if compute_objective else np.nan check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=n_features * n_times * tol + rtol * max( np.sqrt( sum( squared_norm(Z_M[m][0]) + squared_norm(Z_M[m][1]) for m in range(1, n_times))), np.sqrt( squared_norm(K) + sum( squared_norm(K[:-m]) + squared_norm(K[m:]) for m in range(1, n_times))), ), e_dual=n_features * n_times * tol + rtol * rho * np.sqrt( sum( squared_norm(U_M[m][0]) + squared_norm(U_M[m][1]) for m in range(1, n_times))), ) for m in range(1, n_times): Z_M_old[m] = (Z_M[m][0].copy(), Z_M[m][1].copy()) if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) checks.append(check) if stop_at is not None: if abs(check.obj - stop_at) / abs(stop_at) < stop_when: break if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled # U_0 *= rho / rho_new for m in range(1, n_times): U_L, U_R = U_M[m] U_L *= rho / rho_new U_R *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") return_list = [K] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) return return_list
def time_latent_graph_lasso_v2(emp_cov, alpha=1., tau=1., rho=1., beta=1., eta=1., max_iter=1000, verbose=False, psi='laplacian', phi='laplacian', assume_centered=False, tol=1e-4, rtol=1e-2, return_history=False, return_n_iter=True, mode=None): r"""Time-varying latent variable graphical lasso solver. Solves the following problem via ADMM: min sum_{i=1}^T -n_i log_likelihood(K_i-L_i) + alpha ||K_i||_{od,1} + tau ||L_i||_* + beta sum_{i=2}^T Psi(K_i - K_{i-1}) + eta sum_{i=2}^T Phi(L_i - L_{i-1}) where S is the empirical covariance of the data matrix D (training observations by features). Parameters ---------- data_list : list of 2-dimensional matrices. Input matrices. alpha, tau : float, optional Regularisation parameters. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. Returns ------- K, L : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ if psi == 'laplacian': prox_psi = prox_laplacian psi = squared_norm elif psi == 'l1': prox_psi = soft_thresholding_sign psi = l1_norm elif psi == 'l2': prox_psi = blockwise_soft_thresholding psi = np.linalg.norm elif psi == 'linf': prox_psi = prox_linf psi = partial(np.linalg.norm, ord=np.inf) else: raise ValueError("Value of `psi` not understood.") if phi == 'laplacian': prox_phi = prox_laplacian phi = squared_norm elif phi == 'l1': prox_phi = soft_thresholding_sign phi = l1_norm elif phi == 'l2': prox_phi = blockwise_soft_thresholding phi = np.linalg.norm elif phi == 'linf': prox_phi = prox_linf phi = partial(np.linalg.norm, ord=np.inf) else: raise ValueError("Value of `phi` not understood.") # emp_cov = np.array([empirical_covariance( # x, assume_centered=assume_centered) for x in data_list]) n_samples = np.array([s for s in [1.]]) K = np.zeros_like(emp_cov) L = np.zeros_like(emp_cov) X = np.zeros_like(emp_cov) Z_0 = np.zeros_like(K) Z_1 = np.zeros_like(K)[:-1] Z_2 = np.zeros_like(K)[1:] X_0 = np.zeros_like(K) X_1 = np.zeros_like(K)[:-1] X_2 = np.zeros_like(K)[1:] W_0 = np.zeros_like(K) W_1 = np.zeros_like(K)[:-1] W_2 = np.zeros_like(K)[1:] U_0 = np.zeros_like(emp_cov) U_1 = np.zeros_like(emp_cov)[:-1] U_2 = np.zeros_like(emp_cov)[1:] Y_0 = np.zeros_like(emp_cov) Y_1 = np.zeros_like(emp_cov)[:-1] Y_2 = np.zeros_like(emp_cov)[1:] U_consensus = np.zeros_like(emp_cov) Y_consensus = np.zeros_like(emp_cov) Z_consensus = np.zeros_like(emp_cov) Z_consensus_old = np.zeros_like(emp_cov) W_consensus = np.zeros_like(emp_cov) W_consensus_old = np.zeros_like(emp_cov) R_old = np.zeros_like(emp_cov) # divisor for consensus variables, accounting for two less matrices divisor = np.zeros(emp_cov.shape[0]) + 3 divisor[0] -= 1 divisor[-1] -= 1 # eta = np.divide(n_samples, divisor * rho) checks = [] for iteration_ in range(max_iter): # update R A = K - L - X A += Z_0 - W_0 - X_0 A[:-1] += Z_1 - W_1 - X_1 A[1:] += Z_2 - W_2 - X_2 A /= divisor[:, None, None] + 1 A += np.array(map(np.transpose, A)) A /= 2. A *= -rho # / n_samples[:, None, None] A += emp_cov R = np.array(map(prox_logdet, A, n_samples / (rho))) # update K, L K = L + R + X + Z_0 - U_0 K[:-1] += Z_1 - U_1 K[1:] += Z_2 - U_2 K /= divisor[:, None, None] + 1 L = K - R - X + W_0 - Y_0 L[:-1] += W_1 - Y_1 L[1:] += W_2 - Y_2 L /= divisor[:, None, None] + 1 # update Z_0 soft_thresholding = partial(soft_thresholding_sign, lamda=alpha / rho) Z_0 = np.array(map(soft_thresholding, (K + U_0 + R + W_0 + X_0) / 2.)) # update Z_1, Z_2 A_1 = (K[:-1] + U_1 + R[:-1] + W_1 + X_1) / 2. A_2 = (K[1:] + U_2 + R[1:] + W_2 + X_2) / 2. prox_e = prox_psi(A_2 - A_1, lamda=2. * beta / rho) Z_1 = .5 * (A_2 + A_1 - prox_e) Z_2 = .5 * (A_2 + A_1 + prox_e) # update W_0 A = (L + Y_0 - R + Z_0 - X_0) / 2. W_0 = np.array(map(partial(prox_trace_indicator, lamda=tau / rho), A)) # update W_1, W_2 A_1 = (L[:-1] + Y_1 - R[:-1] + Z_1 - X_1) / 2. A_2 = (L[1:] + Y_2 - R[1:] + Z_2 - X_2) / 2. prox_e = prox_phi(A_2 - A_1, lamda=2. * eta / rho) W_1 = .5 * (A_2 + A_1 - prox_e) W_2 = .5 * (A_2 + A_1 + prox_e) # update residuals X += R - K + L X_0 += (R - Z_0 + W_0) X_1 += (R[:-1] - Z_1 + W_1) X_2 += (R[1:] - Z_2 + W_2) U_0 += (K - Z_0) U_1 += (K[:-1] - Z_1) U_2 += (K[1:] - Z_2) Y_0 += (L - W_0) Y_1 += (L[:-1] - W_1) Y_2 += (L[1:] - W_2) # diagnostics, reporting, termination checks Z_consensus = Z_0.copy() Z_consensus[:-1] += Z_1 Z_consensus[1:] += Z_2 Z_consensus /= divisor[:, None, None] U_consensus = U_0.copy() U_consensus[:-1] += U_1 U_consensus[1:] += U_2 U_consensus /= divisor[:, None, None] W_consensus = W_0.copy() W_consensus[:-1] += W_1 W_consensus[1:] += W_2 W_consensus /= divisor[:, None, None] X_consensus = X_0.copy() X_consensus[:-1] += X_1 X_consensus[1:] += X_2 X_consensus /= divisor[:, None, None] Y_consensus = Y_0.copy() Y_consensus[:-1] += Y_1 Y_consensus[1:] += Y_2 Y_consensus /= divisor[:, None, None] check = convergence( obj=objective(n_samples, emp_cov, R, Z_0, Z_1, Z_2, W_0, W_1, W_2, alpha, tau, beta, eta, psi, phi), rnorm=np.sqrt( squared_norm(K - Z_consensus) + squared_norm(L - W_consensus) + squared_norm(Z_consensus - W_consensus - R) + squared_norm(K - L - R)), snorm=np.sqrt( squared_norm(rho * (Z_consensus - Z_consensus_old)) + squared_norm(rho * (W_consensus - W_consensus_old)) + squared_norm(rho * (Z_consensus - W_consensus - (Z_consensus_old - W_consensus_old))) + squared_norm(rho * (R - R_old))), e_pri=np.sqrt(np.prod(K.shape) * 4) * tol + rtol * max( np.sqrt( squared_norm(K) + squared_norm(L) + squared_norm(K - L) + squared_norm(Z_consensus - W_consensus)), np.sqrt( squared_norm(Z_consensus) + squared_norm(W_consensus) + squared_norm(R) + squared_norm(R))), e_dual=np.sqrt(np.prod(K.shape) * 4) * tol + rtol * np.sqrt( squared_norm(rho * (U_consensus)) + squared_norm(rho * (Y_consensus)) + squared_norm(rho * (X_consensus)) + squared_norm(rho * (X)))) Z_consensus_old = Z_consensus.copy() W_consensus_old = W_consensus.copy() R_old = R.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) if (check.rnorm <= check.e_pri and check.snorm <= check.e_dual): break else: warnings.warn("Objective did not converge.") return_list = [K, L, emp_cov] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_) return return_list
def kernel_latent_time_graphical_lasso( emp_cov, alpha=0.01, tau=1.0, rho=1.0, kernel_psi=None, kernel_phi=None, max_iter=100, verbose=False, psi="laplacian", phi="laplacian", mode="admm", tol=1e-4, rtol=1e-4, assume_centered=False, n_samples=None, return_history=False, return_n_iter=True, update_rho_options=None, compute_objective=True, init="empirical", ): r"""Time-varying latent variable graphical lasso solver. Solves the following problem via ADMM: min sum_{i=1}^T -n_i log_likelihood(K_i-L_i) + alpha ||K_i||_{od,1} + tau ||L_i||_* + sum_{s>t}^T k_psi(s,t) Psi(K_s - K_t) + sum_{s>t}^T k_phi(s,t)(L_s - L_t) where S is the empirical covariance of the data matrix D (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Empirical covariance of data. alpha, tau, beta, eta : float, optional Regularisation parameters. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. Returns ------- K, L : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) phi, prox_phi, phi_node_penalty = check_norm_prox(phi) n_times, _, n_features = emp_cov.shape if kernel_psi is None: kernel_psi = np.eye(n_times) if kernel_phi is None: kernel_phi = np.eye(n_times) Z_0 = init_precision(emp_cov, mode=init) W_0 = np.zeros_like(Z_0) X_0 = np.zeros_like(Z_0) R_old = np.zeros_like(Z_0) Z_M, Z_M_old = {}, {} Y_M = {} W_M, W_M_old = {}, {} U_M = {} for m in range(1, n_times): Z_L = Z_0.copy()[:-m] Z_R = Z_0.copy()[m:] Z_M[m] = (Z_L, Z_R) W_L = np.zeros_like(Z_L) W_R = np.zeros_like(Z_R) W_M[m] = (W_L, W_R) Y_L = np.zeros_like(Z_L) Y_R = np.zeros_like(Z_R) Y_M[m] = (Y_L, Y_R) U_L = np.zeros_like(W_L) U_R = np.zeros_like(W_R) U_M[m] = (U_L, U_R) Z_L_old = np.zeros_like(Z_L) Z_R_old = np.zeros_like(Z_R) Z_M_old[m] = (Z_L_old, Z_R_old) W_L_old = np.zeros_like(W_L) W_R_old = np.zeros_like(W_R) W_M_old[m] = (W_L_old, W_R_old) if n_samples is None: n_samples = np.ones(n_times) checks = [] for iteration_ in range(max_iter): # update R A = Z_0 - W_0 - X_0 A += A.transpose(0, 2, 1) A /= 2.0 A *= -rho / n_samples[:, None, None] A += emp_cov # A = emp_cov / rho - A R = np.array( [prox_logdet(a, lamda=ni / rho) for a, ni in zip(A, n_samples)]) # update Z_0 A = R + W_0 + X_0 for m in range(1, n_times): A[:-m] += Z_M[m][0] - Y_M[m][0] A[m:] += Z_M[m][1] - Y_M[m][1] A /= n_times Z_0 = soft_thresholding(A, lamda=alpha / (rho * n_times)) # update W_0 A = Z_0 - R - X_0 for m in range(1, n_times): A[:-m] += W_M[m][0] - U_M[m][0] A[m:] += W_M[m][1] - U_M[m][1] A /= n_times A += A.transpose(0, 2, 1) A /= 2.0 W_0 = np.array( [prox_trace_indicator(a, lamda=tau / (rho * n_times)) for a in A]) # update residuals X_0 += R - Z_0 + W_0 for m in range(1, n_times): # other Zs Y_L, Y_R = Y_M[m] A_L = Z_0[:-m] + Y_L A_R = Z_0[m:] + Y_R if not psi_node_penalty: prox_e = prox_psi(A_R - A_L, lamda=2.0 * np.diag(kernel_psi, m)[:, None, None] / rho) Z_L = 0.5 * (A_L + A_R - prox_e) Z_R = 0.5 * (A_L + A_R + prox_e) else: Z_L, Z_R = prox_psi( np.concatenate((A_L, A_R), axis=1), lamda=0.5 * np.diag(kernel_psi, m)[:, None, None] / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter, ) Z_M[m] = (Z_L, Z_R) # update other residuals Y_L += Z_0[:-m] - Z_L Y_R += Z_0[m:] - Z_R # other Ws U_L, U_R = U_M[m] A_L = W_0[:-m] + U_L A_R = W_0[m:] + U_R if not phi_node_penalty: prox_e = prox_phi(A_R - A_L, lamda=2.0 * np.diag(kernel_phi, m)[:, None, None] / rho) W_L = 0.5 * (A_L + A_R - prox_e) W_R = 0.5 * (A_L + A_R + prox_e) else: W_L, W_R = prox_phi( np.concatenate((A_L, A_R), axis=1), lamda=0.5 * np.diag(kernel_phi, m)[:, None, None] / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter, ) W_M[m] = (W_L, W_R) # update other residuals U_L += W_0[:-m] - W_L U_R += W_0[m:] - W_R # diagnostics, reporting, termination checks rnorm = np.sqrt( squared_norm(R - Z_0 + W_0) + sum( squared_norm(Z_0[:-m] - Z_M[m][0]) + squared_norm(Z_0[m:] - Z_M[m][1]) + squared_norm(W_0[:-m] - W_M[m][0]) + squared_norm(W_0[m:] - W_M[m][1]) for m in range(1, n_times))) snorm = rho * np.sqrt( squared_norm(R - R_old) + sum( squared_norm(Z_M[m][0] - Z_M_old[m][0]) + squared_norm(Z_M[m][1] - Z_M_old[m][1]) + squared_norm(W_M[m][0] - W_M_old[m][0]) + squared_norm(W_M[m][1] - W_M_old[m][1]) for m in range(1, n_times))) obj = (objective(emp_cov, n_samples, R, Z_0, Z_M, W_0, W_M, alpha, tau, kernel_psi, kernel_phi, psi, phi) if compute_objective else np.nan) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=n_features * np.sqrt(n_times * (2 * n_times - 1)) * tol + rtol * max( np.sqrt( squared_norm(R) + sum( squared_norm(Z_M[m][0]) + squared_norm(Z_M[m][1]) + squared_norm(W_M[m][0]) + squared_norm(W_M[m][1]) for m in range(1, n_times))), np.sqrt( squared_norm(Z_0 - W_0) + sum( squared_norm(Z_0[:-m]) + squared_norm(Z_0[m:]) + squared_norm(W_0[:-m]) + squared_norm(W_0[m:]) for m in range(1, n_times))), ), e_dual=n_features * np.sqrt(n_times * (2 * n_times - 1)) * tol + rtol * rho * np.sqrt( squared_norm(X_0) + sum( squared_norm(Y_M[m][0]) + squared_norm(Y_M[m][1]) + squared_norm(U_M[m][0]) + squared_norm(U_M[m][1]) for m in range(1, n_times))), ) R_old = R.copy() for m in range(1, n_times): Z_M_old[m] = (Z_M[m][0].copy(), Z_M[m][1].copy()) W_M_old[m] = (W_M[m][0].copy(), W_M[m][1].copy()) if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled X_0 *= rho / rho_new for m in range(1, n_times): Y_L, Y_R = Y_M[m] Y_L *= rho / rho_new Y_R *= rho / rho_new U_L, U_R = U_M[m] U_L *= rho / rho_new U_R *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") covariance_ = np.array([linalg.pinvh(x) for x in Z_0]) return_list = [Z_0, W_0, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_) return return_list
def time_graphical_lasso( emp_cov, alpha=0.01, rho=1, beta=1, max_iter=100, n_samples=None, verbose=False, psi="laplacian", tol=1e-4, rtol=1e-4, return_history=False, return_n_iter=True, mode="admm", compute_objective=True, stop_at=None, stop_when=1e-4, update_rho_options=None, init="empirical", ): """Time-varying graphical lasso solver. Solves the following problem via ADMM: min sum_{i=1}^T -n_i log_likelihood(S_i, K_i) + alpha*||K_i||_{od,1} + beta sum_{i=2}^T Psi(K_i - K_{i-1}) where S_i = (1/n_i) X_i^T \times X_i is the empirical covariance of data matrix X (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Empirical covariance of data. alpha, beta : float, optional Regularisation parameter. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. n_samples : ndarray Number of samples available for each time point. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. verbose : bool, default False Print info at each iteration. update_rho_options : dict, optional Arguments for the rho update. See regain.update_rules.update_rho function for more information. compute_objective : bool, default True Choose to compute the objective value. init : {'empirical', 'zero', ndarray} Choose how to initialize the precision matrix, with the inverse empirical covariance, zero matrix or precomputed. Returns ------- K : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) Z_0 = init_precision(emp_cov, mode=init) Z_1 = Z_0.copy()[:-1] # np.zeros_like(emp_cov)[:-1] Z_2 = Z_0.copy()[1:] # np.zeros_like(emp_cov)[1:] U_0 = np.zeros_like(Z_0) U_1 = np.zeros_like(Z_1) U_2 = np.zeros_like(Z_2) Z_0_old = np.zeros_like(Z_0) Z_1_old = np.zeros_like(Z_1) Z_2_old = np.zeros_like(Z_2) # divisor for consensus variables, accounting for two less matrices divisor = np.full(emp_cov.shape[0], 3, dtype=float) divisor[0] -= 1 divisor[-1] -= 1 if n_samples is None: n_samples = np.ones(emp_cov.shape[0]) checks = [convergence(obj=objective(n_samples, emp_cov, Z_0, Z_0, Z_1, Z_2, alpha, beta, psi))] for iteration_ in range(max_iter): # update K A = Z_0 - U_0 A[:-1] += Z_1 - U_1 A[1:] += Z_2 - U_2 A /= divisor[:, None, None] # soft_thresholding_ = partial(soft_thresholding, lamda=alpha / rho) # K = np.array(map(soft_thresholding_, A)) A += A.transpose(0, 2, 1) A /= 2.0 A *= -rho * divisor[:, None, None] / n_samples[:, None, None] A += emp_cov K = np.array([prox_logdet(a, lamda=ni / (rho * div)) for a, div, ni in zip(A, divisor, n_samples)]) # update Z_0 A = K + U_0 A += A.transpose(0, 2, 1) A /= 2.0 Z_0 = soft_thresholding(A, lamda=alpha / rho) # other Zs A_1 = K[:-1] + U_1 A_2 = K[1:] + U_2 if not psi_node_penalty: prox_e = prox_psi(A_2 - A_1, lamda=2.0 * beta / rho) Z_1 = 0.5 * (A_1 + A_2 - prox_e) Z_2 = 0.5 * (A_1 + A_2 + prox_e) else: Z_1, Z_2 = prox_psi( np.concatenate((A_1, A_2), axis=1), lamda=0.5 * beta / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter, ) # update residuals U_0 += K - Z_0 U_1 += K[:-1] - Z_1 U_2 += K[1:] - Z_2 # diagnostics, reporting, termination checks rnorm = np.sqrt(squared_norm(K - Z_0) + squared_norm(K[:-1] - Z_1) + squared_norm(K[1:] - Z_2)) snorm = rho * np.sqrt(squared_norm(Z_0 - Z_0_old) + squared_norm(Z_1 - Z_1_old) + squared_norm(Z_2 - Z_2_old)) obj = objective(n_samples, emp_cov, Z_0, K, Z_1, Z_2, alpha, beta, psi) if compute_objective else np.nan # if np.isinf(obj): # Z_0 = Z_0_old # break check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(K.size + 2 * Z_1.size) * tol + rtol * max( np.sqrt(squared_norm(Z_0) + squared_norm(Z_1) + squared_norm(Z_2)), np.sqrt(squared_norm(K) + squared_norm(K[:-1]) + squared_norm(K[1:])), ), e_dual=np.sqrt(K.size + 2 * Z_1.size) * tol + rtol * rho * np.sqrt(squared_norm(U_0) + squared_norm(U_1) + squared_norm(U_2)), # precision=Z_0.copy() ) Z_0_old = Z_0.copy() Z_1_old = Z_1.copy() Z_2_old = Z_2.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) checks.append(check) if stop_at is not None: if abs(check.obj - stop_at) / abs(stop_at) < stop_when: break if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled U_0 *= rho / rho_new U_1 *= rho / rho_new U_2 *= rho / rho_new rho = rho_new # assert is_pos_def(Z_0) else: warnings.warn("Objective did not converge.") covariance_ = np.array([linalg.pinvh(x) for x in Z_0]) return_list = [Z_0, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) return return_list
def enet_kernel_learning_admm( K, y, lamda=0.01, beta=0.01, rho=1., max_iter=100, verbose=0, rtol=1e-4, tol=1e-4, return_n_iter=True, update_rho_options=None): """Elastic Net kernel learning. Solve the following problem via ADMM: min sum_{i=1}^p 1/2 ||alpha_i * w * K_i - y_i||^2 + lamda ||w||_1 + + beta||w||_2^2 """ n_patients = len(K) n_kernels = len(K[0]) coef = np.ones(n_kernels) u_1 = np.zeros(n_kernels) u_2 = np.zeros(n_kernels) w_1 = np.zeros(n_kernels) w_2 = np.zeros(n_kernels) w_1_old = w_1.copy() w_2_old = w_2.copy() checks = [] for iteration_ in range(max_iter): # update alpha # solve (AtA + 2I)^-1 (Aty) with A = wK A = [K[j].T.dot(coef) for j in range(n_patients)] KK = [A[j].dot(A[j].T) for j in range(n_patients)] yy = [y[j].dot(A[j]) for j in range(n_patients)] alpha = [_solve_cholesky_kernel( KK[j], yy[j][..., None], 2).ravel() for j in range(n_patients)] # alpha = [_solve_cholesky_kernel( # K_dot_coef[j], y[j][..., None], 0).ravel() for j in range(n_patients)] w_1 = soft_thresholding(coef + u_1, lamda / rho) w_2 = prox_laplacian(coef + u_2, beta / rho) # equivalent to alpha_dot_K # solve (sum(AtA) + 2*rho I)^-1 (sum(Aty) + rho(w1+w2-u1-u2)) # with A = K * alpha A = [K[j].dot(alpha[j]) for j in range(n_patients)] KK = sum(A[j].dot(A[j].T) for j in range(n_patients)) yy = sum(y[j].dot(A[j].T) for j in range(n_patients)) yy += rho * (w_1 + w_2 - u_1 - u_2) coef = _solve_cholesky_kernel(KK, yy[..., None], 2 * rho).ravel() # update residuals u_1 += coef - w_1 u_2 += coef - w_2 # diagnostics, reporting, termination checks rnorm = np.sqrt(squared_norm(coef - w_1) + squared_norm(coef - w_2)) snorm = rho * np.sqrt( squared_norm(w_1 - w_1_old) + squared_norm(w_2 - w_2_old)) obj = objective_admm(K, y, alpha, lamda, beta, coef, w_1, w_2) check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(2 * coef.size) * tol + rtol * max( np.sqrt(squared_norm(coef) + squared_norm(coef)), np.sqrt(squared_norm(w_1) + squared_norm(w_2))), e_dual=np.sqrt(2 * coef.size) * tol + rtol * rho * ( np.sqrt(squared_norm(u_1) + squared_norm(u_2)))) w_1_old = w_1.copy() w_2_old = w_2.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual and iteration_ > 1: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled u_1 *= rho / rho_new u_2 *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") return_list = [alpha, coef] if return_n_iter: return_list.append(iteration_) return return_list
def latent_time_matrix_decomposition(emp_cov, alpha=0.01, tau=1., rho=1., beta=1., eta=1., max_iter=100, verbose=False, psi='laplacian', phi='laplacian', mode='admm', tol=1e-4, rtol=1e-4, assume_centered=False, return_history=False, return_n_iter=True, update_rho_options=None, compute_objective=True): r"""Latent variable time-varying matrix decomposition solver. Solves the following problem via ADMM: min sum_{i=1}^T || S_i-(K_i-L_i)||^2 + alpha ||K_i||_{od,1} + tau ||L_i||_* + beta sum_{i=2}^T Psi(K_i - K_{i-1}) + eta sum_{i=2}^T Phi(L_i - L_{i-1}) where S is the matrix to decompose. Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Matrix to decompose. alpha, tau, beta, eta : float, optional Regularisation parameters. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. Returns ------- K, L : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) phi, prox_phi, phi_node_penalty = check_norm_prox(phi) Z_0 = np.zeros_like(emp_cov) Z_1 = np.zeros_like(Z_0)[:-1] Z_2 = np.zeros_like(Z_0)[1:] W_0 = np.zeros_like(Z_0) W_1 = np.zeros_like(Z_1) W_2 = np.zeros_like(Z_2) X_0 = np.zeros_like(Z_0) X_1 = np.zeros_like(Z_1) X_2 = np.zeros_like(Z_2) U_1 = np.zeros_like(W_1) U_2 = np.zeros_like(W_2) R_old = np.zeros_like(Z_0) Z_1_old = np.zeros_like(Z_1) Z_2_old = np.zeros_like(Z_2) W_1_old = np.zeros_like(W_1) W_2_old = np.zeros_like(W_2) # divisor for consensus variables, accounting for two less matrices divisor = np.full(emp_cov.shape[0], 3, dtype=float) divisor[0] -= 1 divisor[-1] -= 1 checks = [] for iteration_ in range(max_iter): # update R A = Z_0 - W_0 - X_0 R = (rho * A + 2 * emp_cov) / (2 + rho) # update Z_0 A = R + W_0 + X_0 A[:-1] += Z_1 - X_1 A[1:] += Z_2 - X_2 A /= divisor[:, None, None] # soft_thresholding_ = partial(soft_thresholding, lamda=alpha / rho) # Z_0 = np.array(map(soft_thresholding_, A)) Z_0 = soft_thresholding(A, lamda=alpha / (rho * divisor[:, None, None])) # update Z_1, Z_2 A_1 = Z_0[:-1] + X_1 A_2 = Z_0[1:] + X_2 if not psi_node_penalty: prox_e = prox_psi(A_2 - A_1, lamda=2. * beta / rho) Z_1 = .5 * (A_1 + A_2 - prox_e) Z_2 = .5 * (A_1 + A_2 + prox_e) else: Z_1, Z_2 = prox_psi(np.concatenate((A_1, A_2), axis=1), lamda=.5 * beta / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter) # update W_0 A = Z_0 - R - X_0 A[:-1] += W_1 - U_1 A[1:] += W_2 - U_2 A /= divisor[:, None, None] A += A.transpose(0, 2, 1) A /= 2. W_0 = np.array([ prox_trace_indicator(a, lamda=tau / (rho * div)) for a, div in zip(A, divisor) ]) # update W_1, W_2 A_1 = W_0[:-1] + U_1 A_2 = W_0[1:] + U_2 if not phi_node_penalty: prox_e = prox_phi(A_2 - A_1, lamda=2. * eta / rho) W_1 = .5 * (A_1 + A_2 - prox_e) W_2 = .5 * (A_1 + A_2 + prox_e) else: W_1, W_2 = prox_phi(np.concatenate((A_1, A_2), axis=1), lamda=.5 * eta / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter) # update residuals X_0 += R - Z_0 + W_0 X_1 += Z_0[:-1] - Z_1 X_2 += Z_0[1:] - Z_2 U_1 += W_0[:-1] - W_1 U_2 += W_0[1:] - W_2 # diagnostics, reporting, termination checks rnorm = np.sqrt( squared_norm(R - Z_0 + W_0) + squared_norm(Z_0[:-1] - Z_1) + squared_norm(Z_0[1:] - Z_2) + squared_norm(W_0[:-1] - W_1) + squared_norm(W_0[1:] - W_2)) snorm = rho * np.sqrt( squared_norm(R - R_old) + squared_norm(Z_1 - Z_1_old) + squared_norm(Z_2 - Z_2_old) + squared_norm(W_1 - W_1_old) + squared_norm(W_2 - W_2_old)) obj = objective(emp_cov, R, Z_0, Z_1, Z_2, W_0, W_1, W_2, alpha, tau, beta, eta, psi, phi) \ if compute_objective else np.nan check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(R.size + 4 * Z_1.size) * tol + rtol * max( np.sqrt( squared_norm(R) + squared_norm(Z_1) + squared_norm(Z_2) + squared_norm(W_1) + squared_norm(W_2)), np.sqrt( squared_norm(Z_0 - W_0) + squared_norm(Z_0[:-1]) + squared_norm(Z_0[1:]) + squared_norm(W_0[:-1]) + squared_norm(W_0[1:]))), e_dual=np.sqrt(R.size + 4 * Z_1.size) * tol + rtol * rho * (np.sqrt( squared_norm(X_0) + squared_norm(X_1) + squared_norm(X_2) + squared_norm(U_1) + squared_norm(U_2)))) R_old = R.copy() Z_1_old = Z_1.copy() Z_2_old = Z_2.copy() W_1_old = W_1.copy() W_2_old = W_2.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check) checks.append(check) if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled X_0 *= rho / rho_new X_1 *= rho / rho_new X_2 *= rho / rho_new U_1 *= rho / rho_new U_2 *= rho / rho_new rho = rho_new else: warnings.warn("Objective did not converge.") return_list = [Z_0, W_0] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_) return return_list
def tgl_forward_backward( emp_cov, alpha=0.01, beta=1., max_iter=100, n_samples=None, verbose=False, tol=1e-4, delta=1e-4, gamma=1., lamda=1., eps=0.5, debug=False, return_history=False, return_n_iter=True, choose='gamma', lamda_criterion='b', time_norm=1, compute_objective=True, return_n_linesearch=False, vareps=1e-5, stop_at=None, stop_when=1e-4, laplacian_penalty=False, init='empirical'): """Time-varying graphical lasso solver with forward-backward splitting. Solves the following problem via FBS: min sum_{i=1}^T -n_i log_likelihood(S_i, K_i) + alpha*||K_i||_{od,1} + beta sum_{i=2}^T Psi(K_i - K_{i-1}) where S_i = (1/n_i) X_i^T \times X_i is the empirical covariance of data matrix X (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_times, n_features, n_features) Empirical covariance of data. alpha, beta : float, optional Regularisation parameters. max_iter : int, optional Maximum number of iterations. n_samples : ndarray Number of samples available for each time point. verbose : bool, default False Print info at each iteration. tol : float, optional Absolute tolerance for convergence. delta, gamma, lamda, eps : float, optional FBS parameters. debug : bool, default False Run in debug mode. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. choose : ('gamma', 'lambda', 'fixed', 'both) Search iteratively gamma / lambda / none / both. lamda_criterion : ('a', 'b', 'c') Criterion to choose lamda. See ref for details. time_norm : float, optional Choose the temporal norm between points. compute_objective : bool, default True Choose to compute the objective value. return_n_linesearch : bool, optional Return the number of line-search iterations before convergence. vareps : float, optional Jitter for the loss. stop_at, stop_when : float, optional Other convergence criteria, as used in the paper. laplacian_penalty : bool, default False Use Laplacian penalty. init : {'empirical', 'zero', ndarray} Choose how to initialize the precision matrix, with the inverse empirical covariance, zero matrix or precomputed. Returns ------- K, covariance : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ available_choose = ('gamma', 'lamda', 'fixed', 'both') if choose not in available_choose: raise ValueError( "`choose` parameter must be one of %s." % available_choose) n_times, _, n_features = emp_cov.shape K = init_precision(emp_cov, mode=init) if laplacian_penalty: obj_partial = partial( objective_laplacian, n_samples=n_samples, emp_cov=emp_cov, alpha=alpha, beta=beta, vareps=vareps) function_f = partial( loss_laplacian, beta=beta, n_samples=n_samples, S=emp_cov, vareps=vareps) gradient_f = partial( grad_loss_laplacian, emp_cov=emp_cov, beta=beta, n_samples=n_samples, vareps=vareps) function_g = partial(penalty_laplacian, alpha=alpha) else: psi = partial(vector_p_norm, p=time_norm) obj_partial = partial( objective, n_samples=n_samples, emp_cov=emp_cov, alpha=alpha, beta=beta, psi=psi, vareps=vareps) function_f = partial( loss, n_samples=n_samples, S=emp_cov, vareps=vareps) gradient_f = partial( grad_loss, emp_cov=emp_cov, n_samples=n_samples, vareps=vareps) function_g = partial(penalty, alpha=alpha, beta=beta, psi=psi) max_residual = -np.inf n_linesearch = 0 checks = [convergence(obj=obj_partial(precision=K))] for iteration_ in range(max_iter): k_previous = K.copy() x_inv = np.array([linalg.pinvh(x) for x in K]) grad = gradient_f(K, x_inv=x_inv) if choose in ['gamma', 'both']: gamma, y = choose_gamma( gamma / eps if iteration_ > 0 else gamma, K, function_f=function_f, beta=beta, alpha=alpha, lamda=lamda, grad=grad, delta=delta, eps=eps, max_iter=200, p=time_norm, x_inv=x_inv, choose=choose, laplacian_penalty=laplacian_penalty) x_hat = K - gamma * grad if choose not in ['gamma', 'both']: if laplacian_penalty: y = soft_thresholding_od(x_hat, alpha * gamma) else: y = prox_FL( x_hat, beta * gamma, alpha * gamma, p=time_norm, symmetric=True) if choose in ('lamda', 'both'): lamda, n_ls = choose_lamda( min(lamda / eps if iteration_ > 0 else lamda, 1), K, function_f=function_f, objective_f=obj_partial, gradient_f=gradient_f, function_g=function_g, gamma=gamma, delta=delta, eps=eps, criterion=lamda_criterion, max_iter=200, p=time_norm, grad=grad, prox=y, vareps=vareps) n_linesearch += n_ls K = K + min(max(lamda, 0), 1) * (y - K) # K, t = fista_step(Y, Y - Y_old, t) check = convergence( obj=obj_partial(precision=K), rnorm=np.linalg.norm(upper_diag_3d(K) - upper_diag_3d(k_previous)), snorm=np.linalg.norm( obj_partial(precision=K) - obj_partial(precision=k_previous)), e_pri=np.sqrt(upper_diag_3d(K).size) * tol + tol * max( np.linalg.norm(upper_diag_3d(K)), np.linalg.norm(upper_diag_3d(k_previous))), e_dual=tol) if verbose and iteration_ % (50 if verbose < 2 else 1) == 0: print( "obj: %.4f, rnorm: %.7f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) if return_history: checks.append(check) if np.isnan(check.rnorm) or np.isnan(check.snorm): warnings.warn("precision is not positive definite.") if stop_at is not None: if abs(check.obj - stop_at) / abs(stop_at) < stop_when: break else: # use this convergence criterion subgrad = (x_hat - K) / gamma if 0: if laplacian_penalty: grad = grad_loss_laplacian( K, emp_cov, n_samples, vareps=vareps) else: grad = grad_loss(K, emp_cov, n_samples, vareps=vareps) res_norm = np.linalg.norm(grad + subgrad) if iteration_ == 0: normalizer = res_norm + 1e-6 max_residual = max( np.linalg.norm(grad), np.linalg.norm(subgrad)) + 1e-6 else: res_norm = np.linalg.norm(K - k_previous) / gamma max_residual = max(max_residual, res_norm) normalizer = max( np.linalg.norm(grad), np.linalg.norm(subgrad)) + 1e-6 r_rel = res_norm / max_residual r_norm = res_norm / normalizer if not debug and (r_rel <= tol or r_norm <= tol) and iteration_ > 0: # or ( # check.rnorm <= check.e_pri and iteration_ > 0): break else: warnings.warn("Objective did not converge.") covariance_ = np.array([linalg.pinvh(k) for k in K]) return_list = [K, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) if return_n_linesearch: return_list.append(n_linesearch) return return_list
def equality_time_graphical_lasso( S, K_init, max_iter, loss, C, rho, # n_samples=None, psi, gamma, tol, rtol, verbose, return_history, return_n_iter, mode, compute_objective, stop_at, stop_when, update_rho_options, init): """Equality constrained time-varying graphical LASSO solver. Solves the following problem via ADMM: min sum_{i=1}^T ||K_i||_{od,1} + beta sum_{i=2}^T Psi(K_i - K_{i-1}) s.t. objective = c_i for i = 1, ..., T where S_i = (1/n_i) X_i^T X_i is the empirical covariance of data matrix X (training observations by features). Parameters ---------- emp_cov : ndarray, shape (n_features, n_features) Empirical covariance of data. rho : float, optional Augmented Lagrangian parameter. max_iter : int, optional Maximum number of iterations. n_samples : ndarray Number of samples available for each time point. gamma: float, optional Kernel parameter when psi is chosen to be 'kernel'. constrained_to: float or ndarray, shape (time steps) Log likelihood constraints for K_i tol : float, optional Absolute tolerance for convergence. rtol : float, optional Relative tolerance for convergence. return_history : bool, optional Return the history of computed values. return_n_iter : bool, optional Return the number of iteration before convergence. verbose : bool, default False Print info at each iteration. update_rho_options : dict, optional Arguments for the rho update. See regain.update_rules.update_rho function for more information. compute_objective : bool, default True Choose to compute the objective value. init : {'empirical', 'zero', ndarray} Choose how to initialize the precision matrix, with the inverse empirical covariance, zero matrix or precomputed. Returns ------- K : numpy.array, 3-dimensional (T x d x d) Solution to the problem for each time t=1...T . history : list If return_history, then also a structure that contains the objective value, the primal and dual residual norms, and tolerances for the primal and dual residual norms at each iteration. """ psi, prox_psi, psi_node_penalty = check_norm_prox(psi) psi_name = psi.__name__ if loss == 'LL': loss_function = neg_logl else: loss_function = dtrace K = K_init Z_0 = K.copy() Z_1 = K.copy()[:-1] Z_2 = K.copy()[1:] u = np.zeros((S.shape[0])) U_0 = np.zeros_like(Z_0) U_1 = np.zeros_like(Z_1) U_2 = np.zeros_like(Z_2) Z_0_old = np.zeros_like(Z_0) Z_1_old = np.zeros_like(Z_1) Z_2_old = np.zeros_like(Z_2) I = np.eye(S.shape[1]) checks = [ convergence( obj=equality_objective(loss_function, S, K, C, Z_0, Z_1, Z_2, psi)) ] for iteration_ in range(max_iter): # update K A_K = U_0 - Z_0 A_K[:-1] += Z_1 - U_1 A_K[1:] += Z_2 - U_2 A_K += A_K.transpose(0, 2, 1) A_K /= 2. K = soft_thresholding_od(A_K, lamda=1. / rho) # update Z_0 residual_loss_constraint_u = loss_gen(loss_function, S, Z_0) - C + u A_Z = K + U_0 A_Z += A_Z.transpose(0, 2, 1) A_Z /= 2. if loss_function == neg_logl: A_Z -= residual_loss_constraint_u[:, None, None] * S Z_0 = np.array([ prox_logdet_constrained(_A, _a, I) for _A, _a in zip(A_Z, residual_loss_constraint_u) ]) elif loss_function == dtrace: Z_0 = np.array([ prox_dtrace_constrained(_A, _S, _a, I) for _A, _S, _a in zip(A_Z, S, residual_loss_constraint_u) ]) # other Zs A_1 = K[:-1] + U_1 A_2 = K[1:] + U_2 if not psi_node_penalty: prox_e = prox_psi(A_2 - A_1, lamda=2. / rho) Z_1 = .5 * (A_1 + A_2 - prox_e) Z_2 = .5 * (A_1 + A_2 + prox_e) else: Z_1, Z_2 = prox_psi(np.concatenate((A_1, A_2), axis=1), lamda=.5 / rho, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter) # update residuals residual_loss_constraint = loss_gen(loss_function, S, Z_0) - C u += residual_loss_constraint U_0 += K - Z_0 U_1 += K[:-1] - Z_1 U_2 += K[1:] - Z_2 print(residual_loss_constraint) # diagnostics, reporting, termination checks rnorm = np.sqrt( np.sum(residual_loss_constraint**2) + squared_norm(K - Z_0) + squared_norm(K[:-1] - Z_1) + squared_norm(K[1:] - Z_2)) snorm = rho * np.sqrt( squared_norm(Z_0 - Z_0_old) + squared_norm(Z_1 - Z_1_old) + squared_norm(Z_2 - Z_2_old)) obj = equality_objective(loss_function, S, K, C, Z_0, Z_1, Z_2, psi) if compute_objective else np.nan check = convergence( obj=obj, rnorm=rnorm, snorm=snorm, e_pri=np.sqrt(Z_0.size + 2 * Z_1.size + S.shape[0]) * tol + rtol * max( np.sqrt( np.sum(C**2) + squared_norm(Z_0) + squared_norm(Z_1) + squared_norm(Z_2)), np.sqrt( np.sum( (residual_loss_constraint + C)**2) + squared_norm(K) + squared_norm(K[:-1]) + squared_norm(K[1:]))), e_dual=np.sqrt(Z_0.size + 2 * Z_1.size) * tol + rtol * rho * np.sqrt(squared_norm(U_0) + squared_norm(U_1) + squared_norm(U_2)), ) Z_0_old = Z_0.copy() Z_1_old = Z_1.copy() Z_2_old = Z_2.copy() if verbose: print("obj: %.4f, rnorm: %.4f, snorm: %.4f," "eps_pri: %.4f, eps_dual: %.4f" % check[:5]) checks.append(check) if stop_at is not None: if abs(check.obj - stop_at) / abs(stop_at) < stop_when: break if check.rnorm <= check.e_pri and check.snorm <= check.e_dual: break rho_new = update_rho(rho, rnorm, snorm, iteration=iteration_, **(update_rho_options or {})) # scaled dual variables should be also rescaled u *= rho / rho_new U_0 *= rho / rho_new U_1 *= rho / rho_new U_2 *= rho / rho_new rho = rho_new #assert is_pos_def(Z_0) else: warnings.warn("Objective did not converge.") covariance_ = np.array([linalg.pinvh(x) for x in K]) return_list = [K, covariance_] if return_history: return_list.append(checks) if return_n_iter: return_list.append(iteration_ + 1) return return_list