Ejemplo n.º 1
0
 def power(A, v, it):
     u = v
     for i in range(it):
         v = pnp.dot(A, u)
         l = pnp.dot(pnp.transpose(v), u) * myreciprocal(pnp.dot(pnp.transpose(u), u))
         u = v * myreciprocal(l.flatten()[0])
     return u, l
Ejemplo n.º 2
0
def predict(network, inputs, hypers):
    n_layer = len(network)
    batch_size = hypers[6]
    up = inputs.shape[0] - batch_size + 1
    for s in range(0, up, batch_size):
        temp = pp.farr(inputs[s: s + batch_size, :])
        for i in range(n_layer):
            layer = network[i]
            temp = pnp.dot(temp, layer['weight']) + pnp.dot(pnp.ones((temp.shape[0], 1)), layer['bias'])
            if layer['activation'] == 'relu':
                temp = relu(temp)
            else: # 'linear'
                pass
        if s == 0:
            pred = temp
        else:
            pred = pnp.concatenate((pred, temp), axis = 0)
    if up <= 0 or pred.shape[0] < inputs.shape[0]:
        if up <= 0:
            temp = inputs
        else:
            temp = inputs[pred.shape[0]:, :]
        for i in range(n_layer):
            layer = network[i]
            temp = pnp.dot(temp, layer['weight']) + pnp.dot(pnp.ones((temp.shape[0], 1)), layer['bias'])
            if layer['activation'] == 'relu':
                temp = relu(temp)
            else: # 'linear'
                pass
        if up <= 0:
            pred = temp
        else:
            pred = pnp.concatenate((pred, temp), axis = 0)
    return pred
Ejemplo n.º 3
0
def backward_dropout(network, expected):
    n_layer = len(network)
    for i in reversed(range(n_layer)):
        layer = network[i]
        if i == n_layer - 1: # linear, outputs_before_act == outputs
            exp_size = pnp.ravel(expected).shape[0]
            layer['delta'] = (layer['outputs_before_act'] - expected) / (exp_size + 0.0)
            layer['gradient'] = pnp.dot(pnp.transpose(layer['inputs']), layer['delta'])
            layer['bias_gradient'] = pnp.dot(pnp.ones((1, layer['inputs'].shape[0])), layer['delta'])
        else:
            nrow = layer['outputs_before_act'].shape[0]
            next = network[i + 1]
            layer['delta'] = pnp.dot(next['delta'], pnp.transpose(next['weight'])) * relu_derivative(layer['outputs_before_act']) * pnp.dot(pnp.ones((nrow, 1)), layer['mask'])
            layer['gradient'] = pnp.dot(pnp.transpose(layer['inputs']), layer['delta'])
            layer['bias_gradient'] = pnp.dot(pnp.ones((1, layer['inputs'].shape[0])), layer['delta'])
Ejemplo n.º 4
0
def forward_dropout(network, inputs):
    n_layer = len(network)
    for i in range(n_layer):
        layer = network[i]
        outputs_before_act = pnp.dot(inputs, layer['weight']) + pnp.dot(pnp.ones((inputs.shape[0], 1)), layer['bias'])
        layer['inputs'] = inputs
        layer['outputs_before_act'] = outputs_before_act
        if layer['activation'] == 'relu':
            outputs_after_act = relu(outputs_before_act)
            nrow = outputs_after_act.shape[0]
            ncol = outputs_after_act.shape[1]
            mask = (np.random.rand(1, ncol) > layer['p']) / (1 - layer['p'])
            layer['mask'] = mask
            outputs_after_act *= np.dot(np.ones((nrow, 1)), mask)
        else: # 'linear'
            pass
        inputs = outputs_after_act
Ejemplo n.º 5
0
def jaccard_sim_po(mat):
    intersect = pnp.dot(mat, pnp.transpose(mat))
    union = pnp.sum(mat, axis = 1)
    union = pnp.reshape(pnp.tile(union, len(mat)), (len(mat), len(mat)))
    union = union + pnp.transpose(union) - intersect + 0.0
    sim = intersect * myreciprocal(union,0.0)
    for i in range(sim.shape[0]):
        sim[i, i] = pp.sfixed(1.0)
    return sim
Ejemplo n.º 6
0
def RWR_po(A, maxiter, restartProb):
    n = len(A)
    # normalize the adjacency matrix
    A = A + 0.0
    tmp_var = pnp.sum(A,axis=0)
    tmp_var = myreciprocal(tmp_var,0.0)
    tmp_var = pnp.tile(tmp_var,(A.shape[0],1))
    P = A * tmp_var
    # Personalized PageRank
    restart = pnp.eye(n) * restartProb
    Q = pnp.eye(n)
    for i in range(maxiter):
        Q = (1 - restartProb) * pnp.dot(P, Q) + restart
    return Q
Ejemplo n.º 7
0
def scale(x, max_abs):
    #x = x / np.dot(np.ones((x.shape[0], 1)), max_abs)
    x_ones = pnp.ones((x.shape[0], 1))    
    sca = pnp.dot(x_ones, max_abs)
    tar_shape = x.shape
    #x = x * pnp.reciprocal(sca)
    batch_size = 20
    tmp_x = [(x[i:i+batch_size, :] * pp.reciprocal(sca[i:i+batch_size, :])) for i in range(0, x.shape[0], batch_size)]
    
    if(len(tmp_x) == 1):
        x = pp.farr(tmp_x)
        x = pnp.reshape(x, tar_shape)
    else:
        x = pnp.vstack(tmp_x)
        x = pnp.reshape(x, tar_shape)

    print("!!!!!!!!!1", x.shape)
    return x
Ejemplo n.º 8
0
def inverse_scale(x, max_abs):
    x_ones = pnp.ones((x.shape[0], 1))
    x = x * pnp.dot(x_ones, max_abs)
    return x
Ejemplo n.º 9
0
def DCA_po(networks, dim, rsp, maxiter, pmiter, log_iter):
    def log_po(x, times):
        tmp = x - 1
        sgn = 1
        result = 0
        for k in range(times):
            result += 1. / (k+1) * sgn * tmp
            tmp *= x - 1
            sgn *= -1
        return result
    
    def power(A, v, it):
        u = v
        for i in range(it):
            v = pnp.dot(A, u)
            l = pnp.dot(pnp.transpose(v), u) * myreciprocal(pnp.dot(pnp.transpose(u), u))
            u = v * myreciprocal(l.flatten()[0])
        return u, l
    
    def hhmul(v, w):
        return v - 2 * pnp.transpose(w).dot(v).flatten()[0]* w
    
    def hhupdate(A, w):
        wA = 2 * pnp.dot(w, pnp.dot(pnp.transpose(w), A))
        wAw = 2 * pnp.dot(pnp.dot(wA, w), pnp.transpose(w))
        A = A - wA - pnp.transpose(wA) + wAw
        return A[1:, 1:]
    
    def pmPCA_po(A, dim, it):
        results = []
        ws = []
        ls = []
        for i in range(dim):
            v = pnp.ones((A.shape[0], 1))
            v, l = power(A, v, it)
            # Prepare a vector w
            w = pnp.zeros(v.shape)
            w[0] = pnp.norm(v)
            w += v
            w = w * myreciprocal(pnp.norm(w))
            # Reduce the matrix dimension
            A = hhupdate(A, w)
            # Reconstruct the eigenvector of original matrix from the current one
            for wp in ws:
                v = pnp.concatenate((pp.farr([[0]]), v))
                v = hhmul(v, wp)
            v = v * myreciprocal(pnp.norm(v))
            results.append(v)
            ws.insert(0, w)
            ls.append(pp.sfixed(l.flatten()[0]))
        return pnp.concatenate(results, axis=1), pp.farr(ls)
    
    P = pp.farr([])
    for net in networks:
        tQ = RWR_po(net, maxiter, rsp)
        if P.shape[0] == 0:
            P = pnp.zeros((tQ.shape[0], 0))
        # concatenate network
        P = pnp.hstack((P, tQ))
    alpha = 0.01
    P = log_po(P + alpha, log_iter) - pnp.log(alpha) # 0 < p <ln(n+1)
    P = pnp.dot(P, pnp.transpose(P)) # 0 < p < n * ln^2(n+1)
    vecs, lambdas = pmPCA_po(P, dim, pmiter)
    sigd = pnp.dot(pnp.eye(dim), pnp.diag(lambdas))
    sigd_sqsq = pnp.sqrt(pnp.sqrt(sigd))
    flag = pnp.abs(sigd)<1e-6
    sigd_sqsq = flag*pnp.zeros(sigd.shape)+(1-flag)*sigd_sqsq
    X = pnp.dot(vecs, sigd_sqsq)
    return X
Ejemplo n.º 10
0
def IMC_po(Y, D, P, k, lamb, maxiter, gciter):
    '''
    D: real matrix: 708 * (350 or lower) (private)
    P: real matrix: 1512 * (800 or lower) (public)
    Y: bin  matrix: 708 * 1512 (private)
    '''
     
    def multi_dot(arr):
        ans = arr[-1]
        for ii in range(len(arr)-2,-1,-1):
            ans = pp.farr(arr[ii]).dot(ans)
        return ans
    
    ## require MPC version <-
    def grad_full(X, W, H, Y, l):
        '''
        The symbol in this function is consistent with the symbol in paper Large-scale Multi-label Learning with Missing Labels
    
        X: real matrix: 708 * (350 or lower) (private)/ 1512 * (800 or lower) (public)
        W: real matrix: (350/800 or lower) * 125 (private)
        H: real matrix: 708/1512 * 125 (private)
        Y: bin  matrix: 708 * 1512 (private, dense)
        l: (lamb) 1
    
        A = X * W * H^T
        D = A - Y
        ans = X^T * D * H
        '''
        ans = multi_dot([pnp.transpose(X), X, W, pnp.transpose(H), H]) - multi_dot([pnp.transpose(X), Y, H]) + l * W
        return ans
    
    def hess_full(X, W, S, H, Y, l):
        '''
        Only works under square loss function
        '''
        ans = multi_dot([pnp.transpose(X), X, S, pnp.transpose(H), H]) + l * S
        return ans
    
    def fdot(A, B):
        # flatten dot. Regard A and B as long vector
        A = pp.farr(A)
        B = pp.farr(B)
        A = A * (1.0/A.shape[0])
        B = B * (1.0/B.shape[0])
        return pnp.sum(A * B)
    
    def GC(X, W, H, Y, l, iters):
        grad_solver = lambda W: grad_full(X, W, H, Y, l)
        hess_solver = lambda W, S: hess_full(X, W, S, H, Y, l)
        R = - grad_solver(W) + 0.0 
        D = R
        oldR2 = fdot(R, R)
        for t in range(iters):
            hessD = hess_solver(W, D)
            a = oldR2 * pp.reciprocal(fdot(D, hessD)+1e-8)
            W += a * D
            R -= a * hessD
            newR2 = fdot(R, R)
            b = newR2 * pp.reciprocal(oldR2+1e-8)
            D = R + b * D
            oldR2 = newR2
        return W
    
    W = pnp.eye(D.shape[1],k)*0.3  
    H = pnp.eye(P.shape[1],k)*0.3 
    
    updateW = lambda W, H, it: GC(D, W, pnp.dot(P, H), Y, lamb, it)
    updateH = lambda W, H, it: GC(P, H, pnp.dot(D, W), pnp.transpose(Y), lamb, it)
    for i in range(maxiter):
        W = updateW(W, H, gciter)
        H = updateH(W, H, gciter)

        if True:  # log
            Yhat = multi_dot([D, W, pnp.transpose(H), pnp.transpose(P)])
            loss = pnp.norm(Y - Yhat)
    Yhat = multi_dot((D, W, pnp.transpose(H), pnp.transpose(P)))
    return Yhat
Ejemplo n.º 11
0
 def hhupdate(A, w):
     wA = 2 * pnp.dot(w, pnp.dot(pnp.transpose(w), A))
     wAw = 2 * pnp.dot(pnp.dot(wA, w), pnp.transpose(w))
     A = A - wA - pnp.transpose(wA) + wAw
     return A[1:, 1:]