Пример #1
0
def grad_dense(R_hat, U_hat, rows, cols, D, P):
    X = build_dense(D, P, rows, cols)
    W = la.inv(X)
    #  print 'W eis:', (min(la.eigvalsh(W)),max(la.eigvalsh(W)))
    R = W.diagonal()
    U = W[rows, cols]
    return (-R + R_hat, 2 * (-U + U_hat))
Пример #2
0
def hessian_dir_dense(R_hat, U_hat, rows, cols, D, P):
    """ Newton step seems to work now?
  """
    X = build_dense(D, P, rows, cols)
    W = la.inv(X)
    n = len(D)
    A = n * np.arange(n) + np.arange(n)
    B = n * rows + cols
    K = np.kron(W, W)
    # todo: finish here
    H = np.vstack(
        (np.hstack((K[np.ix_(A, A)], 2 * K[np.ix_(A, B)])), np.hstack((2 * K[np.ix_(A, B)].T, 4 * K[np.ix_(B, B)])))
    )
    (g_D, g_P) = grad_dense(R_hat, U_hat, rows, cols, D, P)
    g = np.hstack((g_D, g_P))
    v = la.solve(H, g)
    v_D = v[:n]
    v_P = v[n:]
    return (-v_D, -v_P)
Пример #3
0
def covsel_quick(R,U,rows,cols,lbda=1e6):
  min_ei = smallest_ev_arpack(R, U, rows, cols)
  if min_ei < 0:
    print "min_ei is %f"%min_ei
    R0 = R - min_ei + 1e-3
  else:
    R0 = R
  n = len(R)
  m = len(U)
  S = build_dense(R0, U, rows, cols)
  
  L=lbda * np.ones((n,n),dtype=np.double)
  L[np.arange(n),np.arange(n)] = 0
  L[rows,cols]=0
  L[cols,rows]=0
  
  (X_, W_, opt, time) = run_quic(S, L)
  D = X_.diagonal()
  P = X_[rows,cols]
  R2 = W_.diagonal()
  U2 = W_[rows,cols]
  print "Quic: Error is %f diag, %f outer"%(linalg.norm(R0-R2),linalg.norm(U-U2))
  return (D,P)
Пример #4
0
def is_psd_dense(R,U,rows,cols,tolerance=1e-4):
  """ Simple check using dense matrices.
  """
  X = build_dense(R, U, rows, cols)
  return np.all(linalg.eigvalsh(X)>tolerance)
Пример #5
0
from mm.arterial_hkt.gmrf_learning.cvx import run_cvx_dense, covsel_cvx_dense,\
  covsel_cvx_cholmod
from mm.arterial_hkt.gmrf_learning.quic_cpp.low_rank import random_projection_cholmod,\
  random_projection_cholmod_csc
from scikits.sparse.cholmod import analyze

def star(n,diag):
  m = n-1
  D = diag*np.ones(n,dtype=np.double)+np.arange(n)/float(n)
  P = np.arange(m)/float(m)+1
  rows = np.zeros((n-1,),dtype=np.int)
  cols = np.arange(1,n,dtype=np.int)
  return (D,P,rows,cols)

(D,P,rows,cols) = star(5,4)
X = build_dense(D, P, rows, cols)
Xs = build_sparse(D, P, rows, cols)

l1 = logdet_dense(D, P, rows, cols)
l2 = logdet_dense_chol(D, P, rows, cols)
l3 = logdet_cholmod(D, P, rows, cols)

(M,Dn,Pn) = normalized_problem(D, P, rows, cols)

test_data(D, P, rows, cols)
W = la.inv(X)
#Q = random_projection_cholmod(D, U, rows, cols, k, factor)
Q = random_projection_cholmod_csc(Xs, k=1000)
A = Q.T
print A.shape
R = np.sum(A*A,axis=1)
Пример #6
0
def inv_dense(R,U,rows,cols):
  X = build_dense(R, U, rows, cols)
  Y = linalg.inv(X)
  D = Y.diagonal()
  P = Y[rows,cols]
  return (D,P)
Пример #7
0
def logdet_dense_chol(R,U,rows,cols):
  if not is_psd_dense(R, U, rows, cols):
    return -np.Inf
  X = build_dense(R, U, rows, cols)
  C = linalg.cholesky(X).diagonal()
  return np.sum(np.log(C))*2
Пример #8
0
def logdet_dense(R,U,rows,cols):
  if not is_psd_dense(R, U, rows, cols):
    return -np.Inf
  X = build_dense(R, U, rows, cols)
  eis = linalg.eigvalsh(X)
  return np.sum(np.log(eis))