예제 #1
0
def PLS2D_getBeta(theta, ZtX, ZtY, XtX, ZtZ, XtY, YtX, YtZ, XtZ, YtY, n, P,
                  tinds, rinds, cinds):

    # Obtain Lambda
    Lambda = mapping2D(theta, tinds, rinds, cinds)

    # Obtain Lambda'
    Lambdat = spmatrix.trans(Lambda)

    # Obtain Lambda'Z'Y and Lambda'Z'X
    LambdatZtY = Lambdat * ZtY
    LambdatZtX = Lambdat * ZtX

    # Set the factorisation to use LL' instead of LDL'
    cholmod.options['supernodal'] = 2

    # Obtain the cholesky decomposition
    LambdatZtZLambda = Lambdat * ZtZ * Lambda
    I = spmatrix(1.0, range(Lambda.size[0]), range(Lambda.size[0]))
    chol_dict = sparse_chol2D(LambdatZtZLambda + I,
                              perm=P,
                              retF=True,
                              retP=False,
                              retL=False)
    F = chol_dict['F']

    # Obtain C_u (annoyingly solve writes over the second argument,
    # whereas spsolve outputs)
    Cu = LambdatZtY[P, :]
    cholmod.solve(F, Cu, sys=4)

    # Obtain RZX
    RZX = LambdatZtX[P, :]
    cholmod.solve(F, RZX, sys=4)

    # Obtain RXtRX
    RXtRX = XtX - matrix.trans(RZX) * RZX

    # Obtain beta estimates (note: gesv also replaces the second
    # argument)
    betahat = XtY - matrix.trans(RZX) * Cu
    try:
        lapack.posv(RXtRX, betahat)
    except:
        lapack.gesv(RXtRX, betahat)

    return (betahat)
예제 #2
0
 def linear_prog(self):
   A = matrix(self.a)
   B = matrix(self.b)
   C = matrix(self.c)
   A = matrix.trans(A)
   A = A * 1.0
   B = B * 1.0
   C = C  * 1.0 
   sol = solvers.lp(C,A,B)
   return sol['x'] 
예제 #3
0
 def linear_prog(self):
     A = matrix(self.a)
     B = matrix(self.b)
     C = matrix(self.c)
     A = matrix.trans(A)
     A = A * 1.0
     B = B * 1.0
     C = C * 1.0
     sol = solvers.lp(C, A, B)
     return sol['x']
예제 #4
0
 def fit(self, X, Y):
     d = len(X[0])
     n = len(X)
     p = matrix(np.identity(d + 1))
     p[0, 0] = 0
     q = matrix(np.zeros(d + 1))
     g = matrix(-np.diag(Y) * np.matrix(np.append(np.ones((n, 1)), X, axis=1)))
     h = matrix(-np.ones(n))
     sol = solvers.qp(p, q, g, h)['x']
     self.b = sol[0]
     self.w = np.array(matrix.trans(sol[1:, :]))
예제 #5
0
for i in range(w):
    for j in range(h):
        p_array[i][j] = y[i] * y[j] * kernel(x_data[i], x_data[j])

P = matrix(numpy.asarray(p_array), tc='d')
q = matrix([-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0])
G = matrix([[-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0]])
h = matrix([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
A = matrix.trans(matrix(numpy.asarray(y), tc='d'))
b = matrix([0.0])

sol = solvers.qp(P, q, G, h, A, b)

#print P, q, G, h, A, b

print "Solution (alpha):\n", sol['x']

alpha_opt = [i for i in sol['x']]
print alpha_opt

alpha_svm = []

for i in alpha_opt:
    if i < 10e-07:
예제 #6
0
def PLS2D_getSigma2(theta, ZtX, ZtY, XtX, ZtZ, XtY, YtX, YtZ, XtZ, YtY, n, P,
                    I, tinds, rinds, cinds):

    # Obtain Lambda
    #t1 = time.time()
    Lambda = mapping2D(theta, tinds, rinds, cinds)
    #t2 = time.time()
    #print(t2-t1)#3.170967102050781e-05   9

    # Obtain Lambda'
    #t1 = time.time()
    Lambdat = spmatrix.trans(Lambda)
    #t2 = time.time()
    #print(t2-t1)# 3.5762786865234375e-06

    # Obtain Lambda'Z'Y and Lambda'Z'X
    #t1 = time.time()
    LambdatZtY = Lambdat * ZtY
    LambdatZtX = Lambdat * ZtX
    #t2 = time.time()
    #print(t2-t1)#1.049041748046875e-05   13

    # Obtain the cholesky decomposition
    #t1 = time.time()
    LambdatZtZLambda = Lambdat * (ZtZ * Lambda)
    #t2 = time.time()
    #print(t2-t1)#3.790855407714844e-05   2

    #t1 = time.time()
    chol_dict = sparse_chol2D(LambdatZtZLambda + I,
                              perm=P,
                              retF=True,
                              retP=False,
                              retL=False)
    F = chol_dict['F']
    #t2 = time.time()
    #print(t2-t1)#0.0001342296600341797   1

    # Obtain C_u (annoyingly solve writes over the second argument,
    # whereas spsolve outputs)
    #t1 = time.time()
    Cu = LambdatZtY[P, :]
    cholmod.solve(F, Cu, sys=4)
    #t2 = time.time()
    #print(t2-t1)#1.5974044799804688e-05   5

    # Obtain RZX
    #t1 = time.time()
    RZX = LambdatZtX[P, :]
    cholmod.solve(F, RZX, sys=4)
    #t2 = time.time()
    #print(t2-t1)#1.2159347534179688e-05   7

    # Obtain RXtRX
    #t1 = time.time()
    RXtRX = XtX - matrix.trans(RZX) * RZX
    #t2 = time.time()
    #print(t2-t1)#9.775161743164062e-06  11

    # Obtain beta estimates (note: gesv also replaces the second
    # argument)
    #t1 = time.time()
    betahat = XtY - matrix.trans(RZX) * Cu
    try:
        lapack.posv(RXtRX, betahat)
    except:
        lapack.gesv(RXtRX, betahat)
    #t2 = time.time()
    #print(t2-t1)#1.7404556274414062e-05   6

    # Obtain u estimates
    #t1 = time.time()
    uhat = Cu - RZX * betahat
    cholmod.solve(F, uhat, sys=5)
    cholmod.solve(F, uhat, sys=8)
    #t2 = time.time()
    #print(t2-t1)#1.2874603271484375e-05   8

    # Obtain b estimates
    #t1 = time.time()
    bhat = Lambda * uhat
    #t2 = time.time()
    #print(t2-t1)#2.86102294921875e-06  15

    # Obtain residuals sum of squares
    #t1 = time.time()
    resss = YtY - 2 * YtX * betahat - 2 * YtZ * bhat + 2 * matrix.trans(
        betahat) * XtZ * bhat + matrix.trans(
            betahat) * XtX * betahat + matrix.trans(bhat) * ZtZ * bhat
    #t2 = time.time()
    #print(t2-t1)#3.409385681152344e-05   4

    # Obtain penalised residual sum of squares
    #t1 = time.time()
    pss = resss + matrix.trans(uhat) * uhat

    return (pss / n)
예제 #7
0
def PLS(theta, ZtX, ZtY, XtX, ZtZ, XtY, YtX, YtZ, XtZ, YtY, P, tinds, rinds,
        cinds):

    #t1 = time.time()
    # Obtain Lambda from theta
    Lambda = mapping(theta, tinds, rinds, cinds)
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    # Obtain Lambda'
    Lambdat = spmatrix.trans(Lambda)
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    LambdatZtY = Lambdat * ZtY
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    LambdatZtX = Lambdat * ZtX
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    # Set the factorisation to use LL' instead of LDL'
    cholmod.options['supernodal'] = 2
    #t2 = time.time()
    #print(t2-t1)

    # Obtain L
    #t1 = time.time()
    LambdatZtZLambda = Lambdat * ZtZ * Lambda
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    I = spmatrix(1.0, range(Lambda.size[0]), range(Lambda.size[0]))
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    chol_dict = sparse_chol(LambdatZtZLambda + I,
                            perm=P,
                            retF=True,
                            retP=False,
                            retL=False)
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    F = chol_dict['F']
    #t2 = time.time()
    #print(t2-t1)

    # Obtain C_u (annoyingly solve writes over the second argument,
    # whereas spsolve outputs)
    #t1 = time.time()
    Cu = LambdatZtY[P, :]
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    cholmod.solve(F, Cu, sys=4)
    #t2 = time.time()
    #print(t2-t1)

    # Obtain RZX
    #t1 = time.time()
    RZX = LambdatZtX[P, :]
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    cholmod.solve(F, RZX, sys=4)
    #t2 = time.time()
    #print(t2-t1)

    # Obtain RXtRX
    #t1 = time.time()
    RXtRX = XtX - matrix.trans(RZX) * RZX
    #t2 = time.time()
    #print(t2-t1)

    #print(RXtRX.size)
    #print(X.size)
    #print(Y.size)
    #print(RZX.size)
    #print(Cu.size)

    # Obtain beta estimates (note: gesv also replaces the second
    # argument)
    #t1 = time.time()
    betahat = XtY - matrix.trans(RZX) * Cu
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    lapack.posv(RXtRX, betahat)
    #t2 = time.time()
    #print(t2-t1)

    # Obtain u estimates
    #t1 = time.time()
    uhat = Cu - RZX * betahat
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    cholmod.solve(F, uhat, sys=5)
    #t2 = time.time()
    #print(t2-t1)

    #t1 = time.time()
    cholmod.solve(F, uhat, sys=8)
    #t2 = time.time()
    #print(t2-t1)

    # Obtain b estimates
    #t1 = time.time()
    bhat = Lambda * uhat
    #t2 = time.time()
    #print(t2-t1)

    # Obtain residuals sum of squares
    #t1 = time.time()
    resss = YtY - 2 * YtX * betahat - 2 * YtZ * bhat + 2 * matrix.trans(
        betahat) * XtZ * bhat + matrix.trans(
            betahat) * XtX * betahat + matrix.trans(bhat) * ZtZ * bhat
    #t2 = time.time()
    #print(t2-t1)

    # Obtain penalised residual sum of squares
    #t1 = time.time()
    pss = resss + matrix.trans(uhat) * uhat
    #t2 = time.time()
    #print(t2-t1)

    # Obtain Log(|L|^2)
    #t1 = time.time()
    logdet = 2 * sum(cvxopt.log(
        cholmod.diag(F)))  # this method only works for symm decomps
    # Need to do tr(R_X)^2 for rml
    #t2 = time.time()
    #print(t2-t1)

    # Obtain log likelihood
    logllh = -logdet / 2 - X.size[0] / 2 * (1 + np.log(2 * np.pi * pss) -
                                            np.log(X.size[0]))

    #print(L[::(L.size[0]+1)]) # gives diag
    #print(logllh[0,0])
    #print(theta)

    return (-logllh[0, 0])
예제 #8
0
파일: PeLS.py 프로젝트: TomMaullin/BLMM
def PeLS2D(theta, ZtX, ZtY, XtX, ZtZ, XtY, YtX, YtZ, XtZ, YtY, n, P, I, tinds,
           rinds, cinds):

    # Obtain Lambda
    Lambda = mapping2D(theta, tinds, rinds, cinds)

    # Obtain Lambda'
    Lambdat = spmatrix.trans(Lambda)

    # Obtain Lambda'Z'Y and Lambda'Z'X
    LambdatZtY = Lambdat * ZtY
    LambdatZtX = Lambdat * ZtX

    # Obtain the cholesky decomposition
    LambdatZtZLambda = Lambdat * (ZtZ * Lambda)
    chol_dict = sparse_chol2D(LambdatZtZLambda + I,
                              perm=P,
                              retF=True,
                              retP=False,
                              retL=False)
    F = chol_dict['F']

    # Obtain C_u (annoyingly solve writes over the second argument,
    # whereas spsolve outputs)
    Cu = LambdatZtY[P, :]
    cholmod.solve(F, Cu, sys=4)

    # Obtain RZX
    RZX = LambdatZtX[P, :]
    cholmod.solve(F, RZX, sys=4)

    # Obtain RXtRX
    RXtRX = XtX - matrix.trans(RZX) * RZX

    # Obtain beta estimates (note: gesv also replaces the second
    # argument)
    betahat = XtY - matrix.trans(RZX) * Cu
    try:
        lapack.posv(RXtRX, betahat)
    except:
        lapack.gesv(RXtRX, betahat)

    # Obtain u estimates
    uhat = Cu - RZX * betahat
    cholmod.solve(F, uhat, sys=5)
    cholmod.solve(F, uhat, sys=8)

    # Obtain b estimates
    bhat = Lambda * uhat

    # Obtain residuals sum of squares
    resss = YtY - 2 * YtX * betahat - 2 * YtZ * bhat + 2 * matrix.trans(
        betahat) * XtZ * bhat + matrix.trans(
            betahat) * XtX * betahat + matrix.trans(bhat) * ZtZ * bhat

    # Obtain penalised residual sum of squares
    pss = resss + matrix.trans(uhat) * uhat

    # Obtain Log(|L|^2)
    logdet = 2 * sum(cvxopt.log(cholmod.diag(F)))

    # Obtain log likelihood
    logllh = -logdet / 2 - n / 2 * (1 + np.log(2 * np.pi * pss[0, 0]) -
                                    np.log(n))

    return (-logllh)