Beispiel #1
0
 def power(A, v, it):
     u = v
     for i in range(it):
         v = pnp.dot(A, u)
         l = pnp.dot(pnp.transpose(v), u) * myreciprocal(pnp.dot(pnp.transpose(u), u))
         u = v * myreciprocal(l.flatten()[0])
     return u, l
Beispiel #2
0
def jaccard_sim_po(mat):
    intersect = pnp.dot(mat, pnp.transpose(mat))
    union = pnp.sum(mat, axis = 1)
    union = pnp.reshape(pnp.tile(union, len(mat)), (len(mat), len(mat)))
    union = union + pnp.transpose(union) - intersect + 0.0
    sim = intersect * myreciprocal(union,0.0)
    for i in range(sim.shape[0]):
        sim[i, i] = pp.sfixed(1.0)
    return sim
Beispiel #3
0
def backward_dropout(network, expected):
    n_layer = len(network)
    for i in reversed(range(n_layer)):
        layer = network[i]
        if i == n_layer - 1: # linear, outputs_before_act == outputs
            exp_size = pnp.ravel(expected).shape[0]
            layer['delta'] = (layer['outputs_before_act'] - expected) / (exp_size + 0.0)
            layer['gradient'] = pnp.dot(pnp.transpose(layer['inputs']), layer['delta'])
            layer['bias_gradient'] = pnp.dot(pnp.ones((1, layer['inputs'].shape[0])), layer['delta'])
        else:
            nrow = layer['outputs_before_act'].shape[0]
            next = network[i + 1]
            layer['delta'] = pnp.dot(next['delta'], pnp.transpose(next['weight'])) * relu_derivative(layer['outputs_before_act']) * pnp.dot(pnp.ones((nrow, 1)), layer['mask'])
            layer['gradient'] = pnp.dot(pnp.transpose(layer['inputs']), layer['delta'])
            layer['bias_gradient'] = pnp.dot(pnp.ones((1, layer['inputs'].shape[0])), layer['delta'])
Beispiel #4
0
 def grad_full(X, W, H, Y, l):
     '''
     The symbol in this function is consistent with the symbol in paper Large-scale Multi-label Learning with Missing Labels
 
     X: real matrix: 708 * (350 or lower) (private)/ 1512 * (800 or lower) (public)
     W: real matrix: (350/800 or lower) * 125 (private)
     H: real matrix: 708/1512 * 125 (private)
     Y: bin  matrix: 708 * 1512 (private, dense)
     l: (lamb) 1
 
     A = X * W * H^T
     D = A - Y
     ans = X^T * D * H
     '''
     ans = multi_dot([pnp.transpose(X), X, W, pnp.transpose(H), H]) - multi_dot([pnp.transpose(X), Y, H]) + l * W
     return ans
Beispiel #5
0
def dice_sim_matrix_po(X):
    sumX = pnp.sum(X, axis = 1)
    sumX = pnp.reshape(pnp.tile(sumX, len(X)), (len(X), len(X)))
    sumX = sumX + pnp.transpose(sumX)
    cmpX = pnp.zeros((len(X), len(X)))
    for i in range(len(X)):
        cmpX[i] = pnp.sum(X * pnp.reshape(pnp.tile(X[i], len(X)), (len(X), len(X[0]))), axis = 1)
    result = 2 * cmpX * myreciprocal(sumX * sumX, 0.0)
    result = sumX * result
    return result
Beispiel #6
0
def DCA_po(networks, dim, rsp, maxiter, pmiter, log_iter):
    def log_po(x, times):
        tmp = x - 1
        sgn = 1
        result = 0
        for k in range(times):
            result += 1. / (k+1) * sgn * tmp
            tmp *= x - 1
            sgn *= -1
        return result
    
    def power(A, v, it):
        u = v
        for i in range(it):
            v = pnp.dot(A, u)
            l = pnp.dot(pnp.transpose(v), u) * myreciprocal(pnp.dot(pnp.transpose(u), u))
            u = v * myreciprocal(l.flatten()[0])
        return u, l
    
    def hhmul(v, w):
        return v - 2 * pnp.transpose(w).dot(v).flatten()[0]* w
    
    def hhupdate(A, w):
        wA = 2 * pnp.dot(w, pnp.dot(pnp.transpose(w), A))
        wAw = 2 * pnp.dot(pnp.dot(wA, w), pnp.transpose(w))
        A = A - wA - pnp.transpose(wA) + wAw
        return A[1:, 1:]
    
    def pmPCA_po(A, dim, it):
        results = []
        ws = []
        ls = []
        for i in range(dim):
            v = pnp.ones((A.shape[0], 1))
            v, l = power(A, v, it)
            # Prepare a vector w
            w = pnp.zeros(v.shape)
            w[0] = pnp.norm(v)
            w += v
            w = w * myreciprocal(pnp.norm(w))
            # Reduce the matrix dimension
            A = hhupdate(A, w)
            # Reconstruct the eigenvector of original matrix from the current one
            for wp in ws:
                v = pnp.concatenate((pp.farr([[0]]), v))
                v = hhmul(v, wp)
            v = v * myreciprocal(pnp.norm(v))
            results.append(v)
            ws.insert(0, w)
            ls.append(pp.sfixed(l.flatten()[0]))
        return pnp.concatenate(results, axis=1), pp.farr(ls)
    
    P = pp.farr([])
    for net in networks:
        tQ = RWR_po(net, maxiter, rsp)
        if P.shape[0] == 0:
            P = pnp.zeros((tQ.shape[0], 0))
        # concatenate network
        P = pnp.hstack((P, tQ))
    alpha = 0.01
    P = log_po(P + alpha, log_iter) - pnp.log(alpha) # 0 < p <ln(n+1)
    P = pnp.dot(P, pnp.transpose(P)) # 0 < p < n * ln^2(n+1)
    vecs, lambdas = pmPCA_po(P, dim, pmiter)
    sigd = pnp.dot(pnp.eye(dim), pnp.diag(lambdas))
    sigd_sqsq = pnp.sqrt(pnp.sqrt(sigd))
    flag = pnp.abs(sigd)<1e-6
    sigd_sqsq = flag*pnp.zeros(sigd.shape)+(1-flag)*sigd_sqsq
    X = pnp.dot(vecs, sigd_sqsq)
    return X
Beispiel #7
0
 def hess_full(X, W, S, H, Y, l):
     '''
     Only works under square loss function
     '''
     ans = multi_dot([pnp.transpose(X), X, S, pnp.transpose(H), H]) + l * S
     return ans
Beispiel #8
0
def IMC_po(Y, D, P, k, lamb, maxiter, gciter):
    '''
    D: real matrix: 708 * (350 or lower) (private)
    P: real matrix: 1512 * (800 or lower) (public)
    Y: bin  matrix: 708 * 1512 (private)
    '''
     
    def multi_dot(arr):
        ans = arr[-1]
        for ii in range(len(arr)-2,-1,-1):
            ans = pp.farr(arr[ii]).dot(ans)
        return ans
    
    ## require MPC version <-
    def grad_full(X, W, H, Y, l):
        '''
        The symbol in this function is consistent with the symbol in paper Large-scale Multi-label Learning with Missing Labels
    
        X: real matrix: 708 * (350 or lower) (private)/ 1512 * (800 or lower) (public)
        W: real matrix: (350/800 or lower) * 125 (private)
        H: real matrix: 708/1512 * 125 (private)
        Y: bin  matrix: 708 * 1512 (private, dense)
        l: (lamb) 1
    
        A = X * W * H^T
        D = A - Y
        ans = X^T * D * H
        '''
        ans = multi_dot([pnp.transpose(X), X, W, pnp.transpose(H), H]) - multi_dot([pnp.transpose(X), Y, H]) + l * W
        return ans
    
    def hess_full(X, W, S, H, Y, l):
        '''
        Only works under square loss function
        '''
        ans = multi_dot([pnp.transpose(X), X, S, pnp.transpose(H), H]) + l * S
        return ans
    
    def fdot(A, B):
        # flatten dot. Regard A and B as long vector
        A = pp.farr(A)
        B = pp.farr(B)
        A = A * (1.0/A.shape[0])
        B = B * (1.0/B.shape[0])
        return pnp.sum(A * B)
    
    def GC(X, W, H, Y, l, iters):
        grad_solver = lambda W: grad_full(X, W, H, Y, l)
        hess_solver = lambda W, S: hess_full(X, W, S, H, Y, l)
        R = - grad_solver(W) + 0.0 
        D = R
        oldR2 = fdot(R, R)
        for t in range(iters):
            hessD = hess_solver(W, D)
            a = oldR2 * pp.reciprocal(fdot(D, hessD)+1e-8)
            W += a * D
            R -= a * hessD
            newR2 = fdot(R, R)
            b = newR2 * pp.reciprocal(oldR2+1e-8)
            D = R + b * D
            oldR2 = newR2
        return W
    
    W = pnp.eye(D.shape[1],k)*0.3  
    H = pnp.eye(P.shape[1],k)*0.3 
    
    updateW = lambda W, H, it: GC(D, W, pnp.dot(P, H), Y, lamb, it)
    updateH = lambda W, H, it: GC(P, H, pnp.dot(D, W), pnp.transpose(Y), lamb, it)
    for i in range(maxiter):
        W = updateW(W, H, gciter)
        H = updateH(W, H, gciter)

        if True:  # log
            Yhat = multi_dot([D, W, pnp.transpose(H), pnp.transpose(P)])
            loss = pnp.norm(Y - Yhat)
    Yhat = multi_dot((D, W, pnp.transpose(H), pnp.transpose(P)))
    return Yhat
Beispiel #9
0
 def hhupdate(A, w):
     wA = 2 * pnp.dot(w, pnp.dot(pnp.transpose(w), A))
     wAw = 2 * pnp.dot(pnp.dot(wA, w), pnp.transpose(w))
     A = A - wA - pnp.transpose(wA) + wAw
     return A[1:, 1:]
Beispiel #10
0
 def hhmul(v, w):
     return v - 2 * pnp.transpose(w).dot(v).flatten()[0]* w