def disabled_test_factorization_sparse():
    I, J, K, rank = 10, 20, 75, 5
    Tmat = sprand(I, J * K, 0.1).tocoo()
    T = unfolded_sptensor((Tmat.data, (Tmat.row, Tmat.col)), None, 0, [], (I, J, K)).fold()
    core, U = tucker.hooi(T, rank, maxIter=20)

    Tmat = Tmat.toarray()
    T = unfolded_dtensor(Tmat, 0, (I, J, K)).fold()
    core2, U2 = tucker.hooi(T, rank, maxIter=20)

    assert allclose(core2, core)
    for i in range(len(U)):
        assert allclose(U2[i], U[i])
Example #2
0
    def get_tucker_factors(self):
        if self.pretrained is not None:
            raise AttributeError('Not implemented')
        else:
            weights = dtensor(self.weight.cpu())
            if self.bias is not None:
                bias = self.bias.cpu()
            else:
                bias = self.bias
            
            core, (U_cout, U_cin, U_dd) = tucker.hooi(weights,
                                                      [self.ranks[0],
                                                       self.ranks[1],
                                                       weights.shape[-1]], init='nvecs')
            core = core.dot(U_dd.T)

            w_cin = np.array(U_cin)
            w_core = np.array(core)
            w_cout = np.array(U_cout)
            
            if isinstance(self.layer, nn.Sequential):
                w_cin_old = self.layer[0].weight.cpu().data
                w_cout_old = self.layer[2].weight.cpu().data

                U_cin_old = np.array(torch.transpose(w_cin_old.reshape(w_cin_old.shape[:2]), 1, 0))
                U_cout_old = np.array(w_cout_old.reshape(w_cout_old.shape[:2]))
                
                w_cin = U_cin_old.dot(U_cin)
                w_cout = U_cout_old.dot(U_cout)

        w_cin = torch.FloatTensor(np.reshape(w_cin.T, [self.ranks[1], self.cin, 1, 1])).contiguous()
        w_core = torch.FloatTensor(np.reshape(w_core, [self.ranks[0], self.ranks[1], *self.kernel_size])).contiguous()
        w_cout = torch.FloatTensor(np.reshape(w_cout, [self.cout, self.ranks[0], 1, 1])).contiguous()

        return [w_cin, w_core,  w_cout], [None, None,  bias]
Example #3
0
def main(args):
    name = os.path.basename(args.tensor)
    name = name.split('.')[0]
    with open(args.tensor, 'rb') as f:
        data = pickle.load(f).T
        filters, channels, cols, rows = data.shape
        channel_data = []
        col_data = []
        row_data = []
        for d in data:
            core, U = tucker.hooi(dtensor(d.T), [1, 1, 1], init='nvecs')
            core = np.squeeze(core)
            channel_data.append((core * U[2]).reshape(1, 1,
                                                      channels))  # channels
            col_data.append(U[1].reshape(1, cols))  # cols
            row_data.append(U[0].reshape(rows, 1))  # rows

        channel_params = np.stack(channel_data, axis=-1)
        print(channel_params.shape)
        col_params = np.expand_dims(np.stack(col_data, axis=-1), axis=-1)
        print(col_params.shape)
        row_params = np.expand_dims(np.stack(row_data, axis=-1), axis=-1)
        print(row_params.shape)

    path = os.path.join(args.dest_dir, name + '_d.params')
    with open(path, 'w+') as f:
        pickle.dump(channel_params, f)

    path = os.path.join(args.dest_dir, name + '_h.params')
    with open(path, 'w+') as f:
        pickle.dump(col_params, f)

    path = os.path.join(args.dest_dir, name + '_v.params')
    with open(path, 'w+') as f:
        pickle.dump(row_params, f)
Example #4
0
def pci(T, R, rank, max_iter=1000, min_decrease=1e-5):
    shape = np.array(T).shape
    dim = range(len(rank))
    tensors = [dtensor(np.zeros(shape)) for r in range(R)]
    last = 1
    for i in range(max_iter):
        btd = []
        print "iter {0}".format(i + 1)
        for r in range(R):
            Tres = T - (sum(tensors) - tensors[r])
            print "\t HOOI {0}".format(r + 1)
            Td = tucker.hooi(Tres, rank, init='nvecs')
            btd.append(Td)
            coret = Td[0]
            factm = Td[1]
            Tapprox = coret.ttm(factm, dim)
            print "\t\t norm {0}".format(Tapprox.norm())
            tensors[r] = Tapprox
        Tres = T - sum(tensors)
        error = Tres.norm() / T.norm()
        decrease = last - error
        print "\t --------------------"
        print "\t Error {0}".format(error)
        print "\t Decrease {0}".format(decrease)
        if decrease <= min_decrease:
            break
        last = error
    return btd, tensors
Example #5
0
 def impute(self):
     time_s = time.time()
     est_data = self.miss_data.copy()
     SD = dtensor(est_data)
     core1, U1 = tucker.hooi(SD, self.ranks, init='nvecs')
     ttm_data = core1.ttm(U1[0], 0).ttm(U1[1], 1).ttm(U1[2], 2)
     self.est_data = self.W * est_data + (self.W == False) * ttm_data
     time_e = time.time()
     self.exec_time = time_e - time_s
Example #6
0
 def __init__(self, data, rank=2):
     """
     Args:
         data: np.ndarray : the underlying multi-dimensional array
         rank: tucker rank
     Returns:
         BaseTensor object
     """
     self.rank = rank
     self.shape = data.shape
     self.core, self.factors = tucker.hooi(dtensor(data), (self.rank, ) * 3,
                                           maxIter=20)
Example #7
0
def disabled_test_factorization():
    I, J, K, rank = 10, 20, 75, 5
    A = orthomax(np.random.randn(I, rank))
    B = orthomax(np.random.randn(J, rank))
    C = orthomax(np.random.randn(K, rank))

    core_real = dtensor(np.random.randn(rank, rank, rank))
    T = core_real.ttm([A, B, C])
    core, U = tucker.hooi(T, rank)

    assert np.allclose(T, ttm(core, U))
    assert np.allclose(A, orthomax(U[0]))
    assert np.allclose(B, orthomax(U[1]))
    assert np.allclose(C, orthomax(U[2]))
    assert np.allclose(core_real, core)
def hosvd( vol_in , vol_ref , strength=1.0 ):
    ##  Check if package is installed
    if tensor_module_avail is False:
        sys.exit('\nERROR: sktensor module not available!\n')



    ##  Tucker decomposition of reference and noisy volumes
    n1 , n2 , n3 = vol_ref.shape
    R_C , R_U = tuc.hooi( ten.dtensor( vol_ref ) , [ n1 , n2 , n3 ] , init='nvecs' )

    n1 , n2 , n3 = vol_in.shape
    I_C , I_U = tuc.hooi( ten.dtensor( vol_in ) , [ n1 , n2 , n3 ] , init='nvecs' )
    mmax = np.max( vol_in )



    ##  Get curve of the ordered absolute values of the core
    ##  tensor from both reference and input volumes
    curve_ref , order , signs = core2curve( R_C )
    curve_in  , order , signs = core2curve( I_C )


                                                               
    ##  Adjust curve of the input volume to that of the
    ##  reference
    curve_den = curve_in + ( curve_ref - curve_in ) * strength
    core_den  = curve2core( curve_den , [ n1 , n2 , n3 ]  , order , signs )



    ##  Reconstruction
    vol_in[:] = ten.ttm( ten.dtensor( core_den ) , I_U )
    vol_in[:] = vol_in / np.max( vol_in ) * mmax

    return vol_in
Example #9
0
def STD_cpt(sparse_data, W, threshold=1e-4, alpha=2e-10, lm=0.01, p=0.7):
    ds = sparse_data.shape
    X_ori = sparse_data.copy()
    U_list, r_list = T_SVD(X_ori, p)[-2:]
    core, U_list = tucker.hooi(dtensor(X_ori), r_list, init='nvecs')
    [A, B, C] = U_list
    #core = dtensor(X_ori).ttm(A.T, 0).ttm(B.T, 1).ttm(C.T, 2)
    X = core.ttm(A, 0).ttm(B, 1).ttm(C, 2)
    #print(np.linalg.norm(X-X_ori))
    #return
    Upre_list = U_list
    F_diff = sys.maxsize
    iter = 0
    while F_diff > threshold and iter < 500:
        X_pre = X.copy()
        #print('Xpre_norm',np.linalg.norm(X_pre))
        # Upre_list = []
        # for u in U_list:
        #     Upre_list.append(u.copy())
        core_pre = core.copy()
        E = W * (X_ori - core_pre.ttm(Upre_list[0], 0).ttm(
            Upre_list[1], 1).ttm(Upre_list[2], 2))
        for i in range(X.ndim):
            mul1 = (W * E).unfold(i)
            if i == 0:
                mul2 = np.kron(Upre_list[2], Upre_list[1])
            elif i == 1:
                mul2 = np.kron(Upre_list[2], Upre_list[0])
            else:
                mul2 = np.kron(Upre_list[1], Upre_list[0])
            mul3 = core_pre.unfold(i).T
            Upre_list[i] = (1 - alpha * lm) * Upre_list[i] + alpha * np.dot(
                np.dot(mul1, mul2), mul3)
            #print(np.dot(mul1,mul2))
        Temp = E.ttm(Upre_list[0].T, 0).ttm(Upre_list[1].T,
                                            1).ttm(Upre_list[2].T, 2)
        core = (1 - alpha * lm) * core_pre + alpha * Temp
        X = core.ttm(Upre_list[0], 0).ttm(Upre_list[1], 1).ttm(Upre_list[2], 2)
        F_diff = np.linalg.norm(X - X_pre)
        #break
        iter += 1
    return X
Example #10
0
def tucker_cpt(sparse_data, rank_list, W):
    time_s = time.time()
    est_data = sparse_data.copy()
    dshape = np.shape(est_data)
    SD = dtensor(est_data)
    #U = tucker.hosvd(SD,rank_list)
    core1, U1 = tucker.hooi(SD, rank_list, init='nvecs')
    #print('mean,var',np.mean(core1.unfold(0)),core1.var)
    #print('U_mean',(U1[0]==0).max())
    left1 = SD.unfold(0)
    U_1, sigma, VT = np.linalg.svd(left1, 0)
    #print(np.sum(U1[0]-U_1))
    #ttm:����˷�
    ttm_data = core1.ttm(U1[0], 0).ttm(U1[1], 1).ttm(U1[2], 2)
    print(np.linalg.norm(ttm_data - sparse_data))
    #print(np.mean(ttm_data))
    est_data = W * est_data + (W == False) * ttm_data
    time_e = time.time()
    print('-' * 8 + 'tucker' + '-' * 8)
    print('exec_time:' + str(time_e - time_s) + 's')
    return est_data
Example #11
0
    def impute(self):
        time_s = time.time()
        X_ori = self.miss_data.copy()
        core, U_list = tucker.hooi(dtensor(X_ori), self.ranks, init='nvecs')
        X = self.restruct(core, U_list)

        F_diff = sys.maxsize
        iter = 0
        while iter < self.max_iter:
            F_diff_pre = F_diff
            X_pre = X.copy()
            core_pre = core.copy()
            E = self.W * (X_ori - self.restruct(core_pre, U_list))
            for i in range(X.ndim):
                mul1 = (self.W * E).unfold(i)
                if i == 0:
                    mul2 = np.kron(U_list[2], U_list[1])
                elif i == 1:
                    mul2 = np.kron(U_list[2], U_list[0])
                else:
                    mul2 = np.kron(U_list[1], U_list[0])

                mul3 = core_pre.unfold(i).T
                U_list[i] = (1 - self.alpha * self.lam) * U_list[i] + \
                    self.alpha * np.dot(np.dot(mul1, mul2), mul3)

            core_temp = self.restruct(E, U_list, transpose=True)
            core = (1 - self.alpha * self.lam) * \
                core_pre + self.alpha * core_temp
            X = self.restruct(core, U_list)
            F_diff = np.linalg.norm(X - X_pre)
            # print('STD:', F_diff)
            if abs(F_diff - F_diff_pre) < self.threshold:
                break

            iter += 1
        time_e = time.time()
        self.exec_time = time_e - time_s
        self.est_data = X
        return X
Example #12
0
def get_tucker_factors(layer, rank, cin, cout, kernel_size, **kwargs):
    weights, bias = get_weights_and_bias(layer)
    # print("Weights: ", weights.shape, "\nKernel Size: ", kernel_size)
    core, (U_cout, U_cin,
           U_dd) = tucker.hooi(dtensor(weights),
                               [rank[0], rank[1], weights.shape[-1]],
                               init="nvecs")
    core = core.dot(U_dd.T)
    w_cin = np.array(U_cin)
    w_core = np.array(core)
    w_cout = np.array(U_cout)

    if isinstance(layer, keras.Sequential):
        w_cin_old, w_cout_old = [
            to_pytorch_kernel_order(w) for w in [
                layer.layers[0].get_weights()[0],
                layer.layers[-1].get_weights()[0]
            ]
        ]

        U_cin_old = w_cin_old.reshape(w_cin_old.shape[:2]).T
        U_cout_old = w_cout_old.reshape(w_cout_old.shape[:2])
        w_cin = U_cin_old.dot(U_cin)
        w_cout = U_cout_old.dot(U_cout)

    # Reshape to the proper PyTorch shape order.
    w_cin = w_cin.T.reshape((rank[1], cin, 1, 1))
    w_core = w_core.reshape((rank[0], rank[1], *kernel_size))
    w_cout = w_cout.reshape((cout, rank[0], 1, 1))

    # Reorder to TensorFlow order.
    w_cin, w_core, w_cout = [
        to_tf_kernel_order(w) for w in [w_cin, w_core, w_cout]
    ]

    return [w_cin, w_core, w_cout], [None, None, bias]
    q = p*(j-1)/(N+2) + eps#remove the eps for complete dissconnectivity #
    P = np.array([[p,q],[q,p]])
    Gsbm = nx.to_numpy_matrix(nx.stochastic_block_model([int(n/2),int(n/2)],P))
    adj[0:n,0:n,i+j] = Gsbm
    print(i+j)


####  Visualization of the graphs
plt.figure(1)
for i in range(2*N+2):
    plt.subplot(6,7,i+1)
    nx.draw(nx.from_numpy_matrix(adj[0:n,0:n,i]),node_size = 10)

####  Decompose using the Tucker decomposition  ####
T = dtensor(adj)
Y = hooi(T, [n, n, 2*N+2], init='nvecs')

U1 = Y[1][0]
U2 = Y[1][1]
U3 = Y[1][2]

plt.figure(2)
plt.subplot(1,2,1)
plt.imshow(U3.T)
plt.xlabel('Time')
plt.ylabel('Community Structure')
plt.title('Using HOSVD (Tucker Approximation)')

s_vec = np.zeros(2*N+2)
U_data = np.zeros([n,n,2*N+2])
V_data = np.zeros([n,n,2*N+2])
Example #14
0
def decompose_model(model_def_path, model_weights_path, layer_ranks):
    """ CREATE DECOMPOSED MODEL DEFINITION """

    with open(model_def_path) as f:
        model_def = caffe.proto.caffe_pb2.NetParameter()
        google.protobuf.text_format.Merge(f.read(), model_def)

    new_model_def = caffe.proto.caffe_pb2.NetParameter()
    new_model_def.name = model_def.name + '_decomposed'

    if model_def.input:
        new_model_def.input.extend(['data'])
        new_model_def.input_dim.extend(model_def.input_dim)

    new_layers = [
    ]  #Keeping track of new layers helps renaming nodes in the future

    for i, layer in enumerate(model_def.layer):
        if layer.name not in layer_ranks.keys() or layer.type != 'Convolution':
            new_model_def.layer.extend([layer])
        else:
            decomposed_layers = decompose_layer(layer, layer_ranks[layer.name])
            for i in range(3):
                new_layers.append(decomposed_layers[i].name)
            new_model_def.layer.extend(decomposed_layers)

    #Rename bottom/top nodes for some layers !!!
    new_model_def = rename_nodes(new_model_def, new_layers)

    new_model_def_path = model_def_path[:-9] + '_decomposed.prototxt'
    with open(new_model_def_path, 'w') as f:
        google.protobuf.text_format.PrintMessage(new_model_def, f)
    """ CREATE DECOMPOSED MODEL WEIGHTS """

    net = caffe.Net(model_def_path, model_weights_path, caffe.TEST)
    new_net = caffe.Net(new_model_def_path, model_weights_path, caffe.TEST)

    for conv_layer in layer_ranks.keys():
        weights = net.params[conv_layer][0].data
        bias = net.params[conv_layer][1].data
        T = dtensor(weights)
        rank = layer_ranks[conv_layer] + [T.shape[2], T.shape[3]]

        print('Decomposing %s...' % conv_layer)
        core, U = tucker.hooi(T, rank, init='nvecs')

        num_output = net.params[conv_layer][0].data.shape[0]
        channels = net.params[conv_layer][0].data.shape[1]

        core = ttm(core, U[3], mode=3)
        core = ttm(core, U[2], mode=2)
        Us = U[1].reshape(rank[1], channels, 1, 1)
        Ut = U[0].reshape(num_output, rank[0], 1, 1)

        np.copyto(new_net.params[conv_layer + '_S'][0].data, Us)
        np.copyto(new_net.params[conv_layer + '_core'][0].data, core)
        np.copyto(new_net.params[conv_layer + '_T'][0].data, Ut)
        np.copyto(new_net.params[conv_layer + '_T'][1].data, bias)

    new_model_weights_path = model_weights_path[:-11] + '_decomposed.caffemodel'
    new_net.save(new_model_weights_path)

    print('\nDecomposed model definition saved to %s' % new_model_def_path)
    print('\nDecomposed model weights saved to %s' % new_model_weights_path)
    print('\nPlease fine-tune')

    return new_model_def_path, new_model_weights_path
Example #15
0
tensor = loadmat('tensor_test')

#Convert to tensor
T = dtensor(tensor["tensor"])
#T = dtensor(tensor_xyz["tensor_xyz"])
T_arr=np.array(T)

##Tolerance
#eps_0=[9, 2, 1] #0.2
#eps_1=[32, 8, 5] #0.02
#eps_2=[84, 11, 12] #0.002
#eps_3=[181, 12, 31] #0.0002
eps_3=[181, 34, 11] #0.0002

## HOOI
core,U= hooi(T, eps_3, init='nvecs')
#Factor matrices and core tensor
core_S = np.array(core)
print(core_S.shape)

U1 = U[0]
U2 = U[1]
U3 = U[2]

Trec = ttm(core, U)
Trec_S = np.array(Trec)

rel_er=la.norm(T-Trec)/la.norm(T)  #Relative Error
print(rel_er)

#Compression rate