Ejemplo n.º 1
0
def sp_det_sym(M):

    # Change to the LL' decomposition
    prevopts = cholmod.options['supernodal']
    cholmod.options['supernodal'] = 2

    # Obtain decomposition of M
    F = cholmod.symbolic(M)
    cholmod.numeric(M, F)

    # Restore previous options
    cholmod.options['supernodal'] = prevopts

    # As PMP'=LL' and det(P)=1 for all permutations
    # it follows det(M)=det(L)^2=product(diag(L))^2
    return (np.exp(2 * sum(cvxopt.log(cholmod.diag(F)))))
Ejemplo n.º 2
0
def covsel(Y):
    """
    Returns the solution of

         minimize    -log det K + Tr(KY)
         subject to  K_{ij}=0,  (i,j) not in indices listed in I,J.

    Y is a symmetric sparse matrix with nonzero diagonal elements.
    I = Y.I,  J = Y.J.
    """

    I, J = Y.I, Y.J
    n, m = Y.size[0], len(I)
    N = I + J*n         # non-zero positions for one-argument indexing
    D = [k for k in range(m) if I[k]==J[k]]  # position of diagonal elements

    # starting point: symmetric identity with nonzero pattern I,J
    K = spmatrix(0.0, I, J)
    K[::n+1] = 1.0

    # Kn is used in the line search
    Kn = spmatrix(0.0, I, J)

    # symbolic factorization of K
    F = cholmod.symbolic(K)

    # Kinv will be the inverse of K
    Kinv = matrix(0.0, (n,n))

    for iters in range(100):

        # numeric factorization of K
        cholmod.numeric(K, F)
        d = cholmod.diag(F)

        # compute Kinv by solving K*X = I
        Kinv[:] = 0.0
        Kinv[::n+1] = 1.0
        cholmod.solve(F, Kinv)

        # solve Newton system
        grad = 2*(Y.V - Kinv[N])
        hess = 2*(mul(Kinv[I,J],Kinv[J,I]) + mul(Kinv[I,I],Kinv[J,J]))
        v = -grad
        lapack.posv(hess,v)

        # stopping criterion
        sqntdecr = -blas.dot(grad,v)
        print("Newton decrement squared:%- 7.5e" %sqntdecr)
        if (sqntdecr < 1e-12):
            print("number of iterations: ", iters+1)
            break

        # line search
        dx = +v
        dx[D] *= 2      # scale the diagonal elems
        f = -2.0 * sum(log(d))    # f = -log det K
        s = 1
        for lsiter in range(50):
            Kn.V = K.V + s*dx
            try:
                cholmod.numeric(Kn, F)
            except ArithmeticError:
                s *= 0.5
            else:
                d = cholmod.diag(F)
                fn = -2.0 * sum(log(d)) + 2*s*blas.dot(v,Y.V)
                if (fn < f - 0.01*s*sqntdecr):
                     break
                s *= 0.5

        K.V = Kn.V

    return K
Ejemplo n.º 3
0
def PLS2D(theta, ZtX, ZtY, XtX, ZtZ, XtY, YtX, YtZ, XtZ, YtY, n, P, I, tinds,
          rinds, cinds):

    # Obtain Lambda
    #t1 = time.time()
    Lambda = mapping2D(theta, tinds, rinds, cinds)
    #t2 = time.time()
    #print(t2-t1)#3.170967102050781e-05   9

    # Obtain Lambda'
    #t1 = time.time()
    Lambdat = spmatrix.trans(Lambda)
    #t2 = time.time()
    #print(t2-t1)# 3.5762786865234375e-06

    # Obtain Lambda'Z'Y and Lambda'Z'X
    #t1 = time.time()
    LambdatZtY = Lambdat * ZtY
    LambdatZtX = Lambdat * ZtX
    #t2 = time.time()
    #print(t2-t1)#1.049041748046875e-05   13

    # Obtain the cholesky decomposition
    #t1 = time.time()
    LambdatZtZLambda = Lambdat * (ZtZ * Lambda)
    #t2 = time.time()
    #print(t2-t1)#3.790855407714844e-05   2

    #t1 = time.time()
    chol_dict = sparse_chol2D(LambdatZtZLambda + I,
                              perm=P,
                              retF=True,
                              retP=False,
                              retL=False)
    F = chol_dict['F']
    #t2 = time.time()
    #print(t2-t1)#0.0001342296600341797   1

    # Obtain C_u (annoyingly solve writes over the second argument,
    # whereas spsolve outputs)
    #t1 = time.time()
    Cu = LambdatZtY[P, :]
    cholmod.solve(F, Cu, sys=4)
    #t2 = time.time()
    #print(t2-t1)#1.5974044799804688e-05   5

    # Obtain RZX
    #t1 = time.time()
    RZX = LambdatZtX[P, :]
    cholmod.solve(F, RZX, sys=4)
    #t2 = time.time()
    #print(t2-t1)#1.2159347534179688e-05   7

    # Obtain RXtRX
    #t1 = time.time()
    RXtRX = XtX - matrix.trans(RZX) * RZX
    #t2 = time.time()
    #print(t2-t1)#9.775161743164062e-06  11

    # Obtain beta estimates (note: gesv also replaces the second
    # argument)
    #t1 = time.time()
    betahat = XtY - matrix.trans(RZX) * Cu
    try:
        lapack.posv(RXtRX, betahat)
    except:
        lapack.gesv(RXtRX, betahat)
    #t2 = time.time()
    #print(t2-t1)#1.7404556274414062e-05   6

    # Obtain u estimates
    #t1 = time.time()
    uhat = Cu - RZX * betahat
    cholmod.solve(F, uhat, sys=5)
    cholmod.solve(F, uhat, sys=8)
    #t2 = time.time()
    #print(t2-t1)#1.2874603271484375e-05   8

    # Obtain b estimates
    #t1 = time.time()
    bhat = Lambda * uhat
    #t2 = time.time()
    #print(t2-t1)#2.86102294921875e-06  15

    # Obtain residuals sum of squares
    #t1 = time.time()
    resss = YtY - 2 * YtX * betahat - 2 * YtZ * bhat + 2 * matrix.trans(
        betahat) * XtZ * bhat + matrix.trans(
            betahat) * XtX * betahat + matrix.trans(bhat) * ZtZ * bhat
    #t2 = time.time()
    #print(t2-t1)#3.409385681152344e-05   4

    # Obtain penalised residual sum of squares
    #t1 = time.time()
    pss = resss + matrix.trans(uhat) * uhat
    #t2 = time.time()
    #print(t2-t1)#2.6226043701171875e-06  16

    # Obtain Log(|L|^2)
    #t1 = time.time()
    logdet = 2 * sum(cvxopt.log(cholmod.diag(F)))
    #t2 = time.time()
    #print(t2-t1)#1.5735626220703125e-05   14

    # Obtain log likelihood
    #t1 = time.time()
    logllh = -logdet / 2 - n / 2 * (1 + np.log(2 * np.pi * pss[0, 0]) -
                                    np.log(n))
    #t2 = time.time()
    #print(t2-t1)#4.506111145019531e-05   3

    return (-logllh)
Ejemplo n.º 4
0
def covsel(Y):
    """
    Returns the solution of
 
        minimize    -log det K + tr(KY)
        subject to  K_ij = 0  if (i,j) not in zip(I, J).

    Y is a symmetric sparse matrix with nonzero diagonal elements.
    I = Y.I,  J = Y.J.
    """

    cholmod.options['supernodal'] = 2

    I, J = Y.I, Y.J
    n, m = Y.size[0], len(I) 
    # non-zero positions for one-argument indexing 
    N = I + J*n         
    # position of diagonal elements
    D = [ k for k in range(m) if I[k]==J[k] ]  

    # starting point: symmetric identity with nonzero pattern I,J
    K = spmatrix(0.0, I, J) 
    K[::n+1] = 1.0

    # Kn is used in the line search
    Kn = spmatrix(0.0, I, J)

    # symbolic factorization of K 
    F = cholmod.symbolic(K)

    # Kinv will be the inverse of K
    Kinv = matrix(0.0, (n,n))

    for iters in range(100):

        # numeric factorization of K
        cholmod.numeric(K, F)
        d = cholmod.diag(F)

        # compute Kinv by solving K*X = I 
        Kinv[:] = 0.0
        Kinv[::n+1] = 1.0
        cholmod.solve(F, Kinv)
        
        # solve Newton system
        grad = 2 * (Y.V - Kinv[N])
        hess = 2 * ( mul(Kinv[I,J], Kinv[J,I]) + 
               mul(Kinv[I,I], Kinv[J,J]) )
        v = -grad
        lapack.posv(hess,v) 
                                                  
        # stopping criterion
        sqntdecr = -blas.dot(grad,v) 
        print("Newton decrement squared:%- 7.5e" %sqntdecr)
        if (sqntdecr < 1e-12):
            print("number of iterations: %d" %(iters+1))
            break

        # line search
        dx = +v
        dx[D] *= 2      
        f = -2.0*sum(log(d))      # f = -log det K
        s = 1
        for lsiter in range(50):
            Kn.V = K.V + s*dx
            try: 
                cholmod.numeric(Kn, F)
            except ArithmeticError: 
                s *= 0.5
            else:
                d = cholmod.diag(F)
                fn = -2.0 * sum(log(d)) + 2*s*blas.dot(v,Y.V)
                if (fn < f - 0.01*s*sqntdecr): break
                else: s *= 0.5

        K.V = Kn.V

    return K
Ejemplo n.º 5
0
def PLS(theta, ZtX, ZtY, XtX, ZtZ, XtY, YtX, YtZ, XtZ, YtY, P, tinds, rinds,
        cinds):

    #t1 = time.time()
    # Obtain Lambda from theta
    Lambda = mapping(theta, tinds, rinds, cinds)
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    # Obtain Lambda'
    Lambdat = spmatrix.trans(Lambda)
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    LambdatZtY = Lambdat * ZtY
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    LambdatZtX = Lambdat * ZtX
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    # Set the factorisation to use LL' instead of LDL'
    cholmod.options['supernodal'] = 2
    #t2 = time.time()
    #print(t2-t1)

    # Obtain L
    #t1 = time.time()
    LambdatZtZLambda = Lambdat * ZtZ * Lambda
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    I = spmatrix(1.0, range(Lambda.size[0]), range(Lambda.size[0]))
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    chol_dict = sparse_chol(LambdatZtZLambda + I,
                            perm=P,
                            retF=True,
                            retP=False,
                            retL=False)
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    F = chol_dict['F']
    #t2 = time.time()
    #print(t2-t1)

    # Obtain C_u (annoyingly solve writes over the second argument,
    # whereas spsolve outputs)
    #t1 = time.time()
    Cu = LambdatZtY[P, :]
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    cholmod.solve(F, Cu, sys=4)
    #t2 = time.time()
    #print(t2-t1)

    # Obtain RZX
    #t1 = time.time()
    RZX = LambdatZtX[P, :]
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    cholmod.solve(F, RZX, sys=4)
    #t2 = time.time()
    #print(t2-t1)

    # Obtain RXtRX
    #t1 = time.time()
    RXtRX = XtX - matrix.trans(RZX) * RZX
    #t2 = time.time()
    #print(t2-t1)

    #print(RXtRX.size)
    #print(X.size)
    #print(Y.size)
    #print(RZX.size)
    #print(Cu.size)

    # Obtain beta estimates (note: gesv also replaces the second
    # argument)
    #t1 = time.time()
    betahat = XtY - matrix.trans(RZX) * Cu
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    lapack.posv(RXtRX, betahat)
    #t2 = time.time()
    #print(t2-t1)

    # Obtain u estimates
    #t1 = time.time()
    uhat = Cu - RZX * betahat
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    cholmod.solve(F, uhat, sys=5)
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    cholmod.solve(F, uhat, sys=8)
    #t2 = time.time()
    #print(t2-t1)

    # Obtain b estimates
    #t1 = time.time()
    bhat = Lambda * uhat
    #t2 = time.time()
    #print(t2-t1)

    # Obtain residuals sum of squares
    #t1 = time.time()
    resss = YtY - 2 * YtX * betahat - 2 * YtZ * bhat + 2 * matrix.trans(
        betahat) * XtZ * bhat + matrix.trans(
            betahat) * XtX * betahat + matrix.trans(bhat) * ZtZ * bhat
    #t2 = time.time()
    #print(t2-t1)

    # Obtain penalised residual sum of squares
    #t1 = time.time()
    pss = resss + matrix.trans(uhat) * uhat
    #t2 = time.time()
    #print(t2-t1)

    # Obtain Log(|L|^2)
    #t1 = time.time()
    logdet = 2 * sum(cvxopt.log(
        cholmod.diag(F)))  # this method only works for symm decomps
    # Need to do tr(R_X)^2 for rml
    #t2 = time.time()
    #print(t2-t1)

    # Obtain log likelihood
    logllh = -logdet / 2 - X.size[0] / 2 * (1 + np.log(2 * np.pi * pss) -
                                            np.log(X.size[0]))

    #print(L[::(L.size[0]+1)]) # gives diag
    #print(logllh[0,0])
    #print(theta)

    return (-logllh[0, 0])
Ejemplo n.º 6
0
def PeLS2D(theta, ZtX, ZtY, XtX, ZtZ, XtY, YtX, YtZ, XtZ, YtY, n, P, I, tinds,
           rinds, cinds):

    # Obtain Lambda
    Lambda = mapping2D(theta, tinds, rinds, cinds)

    # Obtain Lambda'
    Lambdat = spmatrix.trans(Lambda)

    # Obtain Lambda'Z'Y and Lambda'Z'X
    LambdatZtY = Lambdat * ZtY
    LambdatZtX = Lambdat * ZtX

    # Obtain the cholesky decomposition
    LambdatZtZLambda = Lambdat * (ZtZ * Lambda)
    chol_dict = sparse_chol2D(LambdatZtZLambda + I,
                              perm=P,
                              retF=True,
                              retP=False,
                              retL=False)
    F = chol_dict['F']

    # Obtain C_u (annoyingly solve writes over the second argument,
    # whereas spsolve outputs)
    Cu = LambdatZtY[P, :]
    cholmod.solve(F, Cu, sys=4)

    # Obtain RZX
    RZX = LambdatZtX[P, :]
    cholmod.solve(F, RZX, sys=4)

    # Obtain RXtRX
    RXtRX = XtX - matrix.trans(RZX) * RZX

    # Obtain beta estimates (note: gesv also replaces the second
    # argument)
    betahat = XtY - matrix.trans(RZX) * Cu
    try:
        lapack.posv(RXtRX, betahat)
    except:
        lapack.gesv(RXtRX, betahat)

    # Obtain u estimates
    uhat = Cu - RZX * betahat
    cholmod.solve(F, uhat, sys=5)
    cholmod.solve(F, uhat, sys=8)

    # Obtain b estimates
    bhat = Lambda * uhat

    # Obtain residuals sum of squares
    resss = YtY - 2 * YtX * betahat - 2 * YtZ * bhat + 2 * matrix.trans(
        betahat) * XtZ * bhat + matrix.trans(
            betahat) * XtX * betahat + matrix.trans(bhat) * ZtZ * bhat

    # Obtain penalised residual sum of squares
    pss = resss + matrix.trans(uhat) * uhat

    # Obtain Log(|L|^2)
    logdet = 2 * sum(cvxopt.log(cholmod.diag(F)))

    # Obtain log likelihood
    logllh = -logdet / 2 - n / 2 * (1 + np.log(2 * np.pi * pss[0, 0]) -
                                    np.log(n))

    return (-logllh)
Ejemplo n.º 7
0
FA = umfpack.numeric(A, Fs)
FB = umfpack.numeric(B, Fs)
umfpack.solve(A, FA, x)
umfpack.solve(B, FB, x)
umfpack.solve(A, FA, x, trans='T')
print(x)

A = spmatrix([10, 3, 5, -2, 5, 2], [0, 2, 1, 3, 2, 3], [0, 0, 1, 1, 2, 3])
X = matrix(range(8), (4, 2), 'd')
cholmod.linsolve(A, X)
print(X)

X = cholmod.splinsolve(A, spmatrix(1.0, range(4), range(4)))
print(X)

X = matrix(range(8), (4, 2), 'd')
F = cholmod.symbolic(A)
cholmod.numeric(A, F)
cholmod.solve(F, X)
print(X)

F = cholmod.symbolic(A)
cholmod.numeric(A, F)
print(2.0 * sum(log(cholmod.diag(F))))
options['supernodal'] = 0
F = cholmod.symbolic(A)
cholmod.numeric(A, F)
Di = matrix(1.0, (4, 1))
cholmod.solve(F, Di, sys=6)
print(-sum(log(Di)))