Beispiel #1
1
def ten_id(A, r, method = 'svd'):
	d = A.ndim
	rank = tuple(r*np.ones((d,), dtype = 'i'))

	A = dtensor(A)
	Amodes = matricize(A)
	
	if method == 'svd':
		T = hosvd(A, rank, method = 'svd')

	elif method == 'randsvd':
		T = hosvd(A, rank, method = 'randsvd')
	elif method == 'rid':
		#Tensor ID
		T = hoid(A, rank, method = 'rid') 
	elif method == 'pqr':
		#Tensor ID
		T = hoid(A, rank, method = 'qr') 
	elif method == 'id':
		#Tensor ID
		T = hoid(A, rank, method = 'rrqr') 

	elif method == 'stid':
		T = sthoid(A, rank, method = 'rrqr') 


	else:
		raise NotImplementedError

	#Tmodes = T.matricize()
	#err = [np.linalg.norm(tm-am)/np.linalg.norm(am) for am,tm in zip(Amodes,Tmodes)]

	am = A.unfold(0)
	tm = T.G.ttm(T.U).unfold(0)	
	err = norm(tm-am)/norm(am)




	return (err, T.I) if method == 'id' else err
Beispiel #2
0
def sparse_model():
    lenet_path = '/home/fuxianya/sdd/alexnet/alexnet'
    net = caffe.Classifier(lenet_path + '_deploy.prototxt',
                           lenet_path + '.caffemodel')
    fast_net = caffe.Classifier(lenet_path + '_deploy.prototxt',
                                lenet_path + '.caffemodel')
    model = load_model(lenet_path + '.prototxt')
    layer_num = len(net.layers)
    print(len(net.layers))
    for ind in range(layer_num + 1):
        if model.layer[ind].type in ['Convolution']:
            i = ind - 1
            print(model.layer[ind].name)
            print("sparse conv")
            weights = net.layers[i].blobs[0].data
            T = dtensor(weights)
            print(weights.shape)
            W = sparse_weights(T, weights.shape[0], weights.shape[1],
                               weights.shape[2], weights.shape[3])
            np.copyto(fast_net.layers[i].blobs[0].data, W)
        if model.layer[ind].type in ['InnerProduct0']:
            print(ind)
            i = ind - 1
            print("sparse inner")
            print(model.layer[ind].name)
            weights = net.layers[i].blobs[0].data
            T = dtensor(weights)
            print(weights.shape)
            W = sparse_weights_2(T, weights.shape[0], weights.shape[1])
            np.copyto(fast_net.layers[i].blobs[0].data, W)

    fast_net.save(lenet_path + '_sparse.caffemodel')
Beispiel #3
0
    def test_cp_decomposition(self):
        test_data1 = T.array([[[0, 1, 3, 4], [4, 0, 2, 1], [4, 2, 3, 4]],
                              [[2, 4, 2, 3], [3, 3, 2, 4], [2, 3, 0, 2]]])
        test_data2 = T.array([[[3, 1, 1, 2], [1, 0, 3, 2], [3, 4, 0, 2]],
                              [[1, 2, 3, 3], [2, 3, 1, 0], [1, 2, 0, 2]]])
        factors, lamda = cp(test_data1,
                            r=3,
                            stop_iter=500,
                            tol=1e-6,
                            normalize_factor=True,
                            random_seed=44)
        P, fit, itr = cp_als(dtensor(test_data1), 3, init='random')
        T.testing.assert_array_almost_equal(reconstruct_tensor(
            factors, lamda, (2, 3, 4)),
                                            P.toarray(),
                                            decimal=0)

        factors, lamda = cp(test_data2,
                            r=3,
                            stop_iter=500,
                            tol=1e-6,
                            normalize_factor=True,
                            random_seed=44)
        P, fit, itr = cp_als(dtensor(test_data2), 3, init='random')
        T.testing.assert_array_almost_equal(reconstruct_tensor(
            factors, lamda, (2, 3, 4)),
                                            P.toarray(),
                                            decimal=0)
Beispiel #4
0
def tensor_decomposition(src, daytype, method="", component_num=0, sample_rate=1.0):
	import numpy as np
	import sys
	sys.path.append('./scikit-tensor')
	from sktensor import dtensor, tucker_hooi
	from tensor_decomposition_gradient import Gradient_Tensor_Decomposition
	# https://gist.github.com/panisson/7719245
	from _ncp import *
	from _beta_ntf import *

	_component_num = component_num
	tensor_data = json.loads(open("../../data/tensor_data1/tensor_{0}_{1}.txt".format(src,daytype)).read())

	if src == "metro":
		tensor_data = [[[100.0*tensor_data[x][y][h]/sum(tensor_data[x][y]) if sum(tensor_data[x][y])!=0 else 0 \
						for h in range(24)] for y in range(len(tensor_data[0]))] for x in range(len(tensor_data))]
	if src == "taxi":
		if daytype == "workdays":
			tensor_data = [[[tensor_data[x][y][h] if sum(tensor_data[x][y])>=90 else 0 \
							for h in range(24)] for y in range(len(tensor_data[0]))] for x in range(len(tensor_data))]
		if daytype == "holidays":
			tensor_data = [[[tensor_data[x][y][h] if sum(tensor_data[x][y])>=60 else 0 \
							for h in range(24)] for y in range(len(tensor_data[0]))] for x in range(len(tensor_data))]

	# --- sample ---
	A = np.array(tensor_data)
	l1, l2, l3 = A.shape
	A = A.reshape((l1*l2*l3,))
	m = [i for i, a in enumerate(A) if a != 0]
	C = np.random.choice(len(m), len(m)*(1.-sample_rate))
	for i in C:
		A[m[i]] = 0
	A = A.reshape((l1,l2,l3))

	# --- decomposition ---
	T = dtensor(tensor_data)
	# core, (matrix_time, matrix_location_start, matrix_location_finish) = tucker_hooi(T, _component_num, init='random')
	# core, (matrix_time, matrix_location_start, matrix_location_finish) = Gradient_Tensor_Decomposition(T, _component_num, 30, 0.0001, 0.1)
	if method == "ANLS_BPP":
		X_approx = nonnegative_tensor_factorization(dtensor(A), _component_num, method='anls_bpp', min_iter=50, max_iter=500)
		X_approx = X_approx.totensor()
	elif method == "ANLS_AS":
		X_approx = nonnegative_tensor_factorization(dtensor(A), _component_num, method='anls_asgroup', min_iter=50, max_iter=500)
		X_approx = X_approx.totensor()
	elif method == "Beta_NTF":
		beta_ntf = BetaNTF(A.shape, n_components=_component_num, beta=2, n_iter=100, verbose=True)
		beta_ntf.fit(A)
		matrix_location_start, matrix_location_finish, matrix_time = beta_ntf.factors_
		E = np.zeros((_component_num, _component_num, _component_num))
		for k in xrange(_component_num):
			E[k][k][k] = 1
		C = dtensor(E)
		X_approx = C.ttm(matrix_location_start, 0).ttm(matrix_location_finish, 1).ttm(matrix_time, 2)

	# --- compute error ---
	# X_err = abs(T - X_approx).sum()/T.sum()
	X_err = (((T - X_approx)**2).sum()/(T**2).sum())**0.5
	print "Error:", X_err
Beispiel #5
0
    def get_cp_factors(self):

        if self.pretrained is not None:
            mat_dict = scipy.io.loadmat(self.pretrained)

            if mat_dict['R'][0][0] != self.rank:
                print('WRONG FACTORS, do not correspond to desired rank')

            PU_z, PU_cout, PU_cin = [Ui[0] for Ui in mat_dict['P_bals_epc_U']]
            Plmbda = mat_dict['P_bals_epc_lambda'].ravel()

            f_cin = np.array(PU_cin)
            f_cout = np.array(PU_cout)
            f_z = (np.array(PU_z) * (Plmbda))

        else:
            if '__getitem__' in dir(self.layer):
                f_cout_old, f_cin_old, f_z_old = self.weight

                f_cout_old = np.array(f_cout_old)
                f_cin_old = np.array(f_cin_old)
                f_z_old = np.array(f_z_old)

                bias = self.bias

                f_cout, f_cin, f_z = recompress_ncpd_tensor(
                    [f_cout_old, f_cin_old, f_z_old],
                    new_rank=self.rank,
                    max_cycle=500,
                    return_fit=False,
                    tensor_format='cpd')
            else:
                if self.weight.is_cuda:
                    self.weight = self.weight.cpu()
                    if self.bias is not None:
                        self.bias = self.bias.cpu()
                weights = dtensor(self.weight)
                bias = self.bias

                T = dtensor(self.weight)
                P, fit, itr, exectimes = cp_als(T, self.rank, init='random')

                f_cout = np.array(P.U[0])
                f_cin = np.array(P.U[1])
                f_z = (np.array(P.U[2]) * (P.lmbda))

        f_cin = torch.FloatTensor(
            np.reshape(f_cin.T, [self.rank, self.cin, 1, 1])).contiguous()
        f_z = torch.FloatTensor(
            np.reshape(f_z.T, [self.rank, 1, *self.kernel_size])).contiguous()
        f_cout = torch.FloatTensor(
            np.reshape(f_cout, [self.cout, self.rank, 1, 1])).contiguous()

        return [f_cin, f_z, f_cout], [None, None, bias]
def decompose_tensor(filters):
    """ filters is of type input feature maps, output feature maps, wxh of filter
        Output is a structure P which contains lambda, U{1}, U{2}, U{3}    
    """
    # Set logging to DEBUG to see CP-ALS information
    logging.basicConfig(level=logging.DEBUG)
    print filters.shape
    filters = np.array(filters)   
    print filters.shape 
    print filters.dtype
    nbr_filters = filters.shape[0]
    fwidth = filters.shape[2]
    fheight = filters.shape[3]
    Pstruct = []
    for chanel in range(filters.shape[1]):
        filter_for_channel = filters[:,chanel,:,:]
        filter_for_channel.reshape(nbr_filters, fwidth, fheight)
        filter_for_channel = np.swapaxes(filter_for_channel, 0,2);
        print 'Number of filters ', nbr_filters
        print 'filter_for channel shape ', filter_for_channel.shape
        fig, axes = plt.subplots(nrows=5, ncols=4)
        fig.tight_layout() 
        
        for f in xrange(nbr_filters):
            img = filter_for_channel[:,:,f]
            plt.subplot(5,4,f)
            plt.imshow(img)
        plt.show(block=False)
        T  = dtensor(filter_for_channel);
        rank = np.floor(nbr_filters*0.6);
        print 'rank is ', rank
        session = pymatlab.session_factory()
        session.putvalue('A',rank)
        del session
        ## P.U, P.lmbda
        print 'P U0,U1,U2, lambda sizes: ', P.U[0].size, P.U[1].size, P.U[2].size, P.lmbda
        print 'fit was ', fit        
        Pstruct.append(P)
        #dtensor(ktensor(U).toarray())
        print np.allclose(T, P.totensor())
    
    
    U = [np.random.rand(i,3) for i in (20, 10, 14)]
    
    Tn = dtensor(ktensor(U).toarray())
    P, fit, itr, _ = cp_als(Tn, 10)
    print 'P U0,U1,U2, lambda sizes: ', P.U[0].size, P.U[1].size, P.U[2].size, P.lmbda
    print 'fit was ', fit  
    print np.allclose(Tn, P.totensor())
    
    return Pstruct
Beispiel #7
0
def NTD_ALS_decoupledversion(X,Coretensorsize,max_iter,N,m,epsilon):
    G_old,list_of_factors=Tucker2HOSVD(X,Coretensorsize,N,m,True)
    A2_old=np.maximum(list_of_factors[0],0)
    A3_old=np.maximum(list_of_factors[1],0)
    A2_new=np.zeros(A2_old.shape)
    A3_new=np.zeros(A3_old.shape)
    G_result=G_old
    I=np.identity(Coretensorsize[0]) 
    U=[I,A2_old,A3_old] 
    nb_iter=0   
    error_list=[]
    approxim=Product_with_factors(G_old,U)
    error=np.linalg.norm(X-approxim)
    previous_error=0
    error_list.append(error)
    while(nb_iter<max_iter):     
       previous_error=copy.copy(error)
       nb_iter=nb_iter+1       
       G_new=decomposition_for_core_retrieving_parallelized(X,U[1:3])
       approxim=Product_with_factors(dtensor(G_new),U)
       error=(X-dtensor(approxim)).norm() 
       error_list.append(error)
       for n in range(3):         
          if(n==1):
            temp=dtensor(G_new)
            temp=temp.unfold(n)      
            B2=np.dot(np.kron(A3_old,I),np.transpose(temp))    
            A2_new=np.transpose(NMF_decoupling(B2,np.transpose(X.unfold(n)),False)) #We update A1
            #A2_new=np.transpose(NMF_decoupling_parallelized(B2,np.transpose(X.unfold(n)))) #We update A2
            error=Error_estimation_factor(B2,X,n,A2_new)
            error_list.append(error) 
          if(n==2):
            temp=dtensor(G_new)
            temp=temp.unfold(n)
            B3=np.dot(np.kron(A2_new,I),np.transpose(temp)) 
            #A3_new=np.transpose(NMF_decoupling_parallelized(B3,np.transpose(X.unfold(n))))#We update A2
            A3_new=np.transpose(NMF_decoupling(B3,np.transpose(X.unfold(n)),False))#We update A2
            error=Error_estimation_factor(B3,X,n,A3_new)
            error_list.append(error)  
       U=[I,A2_new,A3_new]
       G_result=G_new
       if(previous_error-error<epsilon):        
         U=[I,A2_old,A3_old]
         G_result=G_old
       
         break
       A2_old=A2_new
       A3_old=A3_new
       G_old=G_new
      
    return U,G_result,error_list,nb_iter
Beispiel #8
0
def get_cp_factors(layer, rank, cin, cout, kernel_size, **kwargs):
    weights, bias = get_weights_and_bias(layer)
    w_cin = None
    w_z = None
    w_cout = None

    if isinstance(layer, keras.Sequential):
        w_cout, w_cin, w_z = recompress_ncpd_tensor(weights,
                                                    new_rank=rank,
                                                    max_cycle=500,
                                                    return_fit=False,
                                                    tensor_format="cpd")
    elif isinstance(layer, keras.layers.Conv2D):
        P, _, _ = cp_als(dtensor(weights), rank, init="random")
        w_cin, w_cout, w_z = extract_weights_tensors(P)

    if w_cin is None or w_z is None or w_cout is None:
        raise CompressionError()

    # Reshape to the proper PyTorch shape order.
    w_cin = w_cin.T.reshape((rank, cin, 1, 1))
    w_z = w_z.T.reshape((rank, 1, *kernel_size))
    w_cout = w_cout.reshape((cout, rank, 1, 1))

    # Reorder to TensorFlow order.
    w_cin, w_z, w_cout = [to_tf_kernel_order(w) for w in [w_cin, w_z, w_cout]]

    return [w_cin, w_z, w_cout], [None, None, bias]
Beispiel #9
0
    def factorize(self,
                  x,
                  iterations=100,
                  showProgress=False,
                  default=True,
                  reference_matrix=[],
                  L_matrix=[],
                  lambda_0=0.0,
                  lambda_1=0.0):
        if not default:
            x = dtensor(x)

            num_ways = len(self.factor[0])
            X_itr = []
            R = len(self.factor)
            for way_index in range(num_ways):
                X_cur = []
                for r in range(R):
                    X_cur.append(self.factor[r][way_index].tolist())
                X_itr.append(np.array(X_cur).T)
            import copy
            X_itr = copy.deepcopy(reference_matrix)
            error, similarity_error, reference_error = self.computeError(
                x, X_itr, reference_matrix, L_matrix)
            print(
                "start: {}/{}. reconstruct: {}; item: {}; pattern: {} ".format(
                    0, iterations, error, similarity_error, reference_error))

        for i1 in np.arange(1, iterations + 1):
            if default:
                self.updateAllFactors(x, self.factor)
            else:
                # pdb.set_trace()
                # lambda_0 = 0.01
                X_itr = self.updateAllFactorsGradient(
                    x,
                    X_itr,
                    num_ways,
                    R,
                    reference_matrix=reference_matrix,
                    L_matrix=L_matrix,
                    lambda_0=lambda_0,
                    lambda_1=lambda_1)
                error, similarity_error, reference_error = self.computeError(
                    x, X_itr, reference_matrix, L_matrix)
                # error_X = math.sqrt(getError(x,ktensor_X,x.norm()))/x.norm()
            if showProgress and (i1 % 1) == 0:
                # progress = "*" if 0 < (i1 % 20) \
                # 	else "[%d/%d]\n" % (i1, iterations)
                print("{}/{}. reconstruct: {}; item: {}; pattern: {} ".format(
                    i1, iterations, error, similarity_error, reference_error))
                # print(error_X)
        if not default:
            result_factor = []
            for r in range(R):
                each_factor = []
                for way_index in range(num_ways):
                    each_factor.append(X_itr[way_index].T[r])
                result_factor.append(each_factor)
            self.factor = result_factor
Beispiel #10
0
def pci(T, R, rank, max_iter=1000, min_decrease=1e-5):
    shape = np.array(T).shape
    dim = range(len(rank))
    tensors = [dtensor(np.zeros(shape)) for r in range(R)]
    last = 1
    for i in range(max_iter):
        btd = []
        print "iter {0}".format(i + 1)
        for r in range(R):
            Tres = T - (sum(tensors) - tensors[r])
            print "\t HOOI {0}".format(r + 1)
            Td = tucker.hooi(Tres, rank, init='nvecs')
            btd.append(Td)
            coret = Td[0]
            factm = Td[1]
            Tapprox = coret.ttm(factm, dim)
            print "\t\t norm {0}".format(Tapprox.norm())
            tensors[r] = Tapprox
        Tres = T - sum(tensors)
        error = Tres.norm() / T.norm()
        decrease = last - error
        print "\t --------------------"
        print "\t Error {0}".format(error)
        print "\t Decrease {0}".format(decrease)
        if decrease <= min_decrease:
            break
        last = error
    return btd, tensors
Beispiel #11
0
 def test_cp_reconstruction(self):
     data = T.array([[[3, 1, 1, 2], [1, 0, 3, 2], [3, 4, 0, 2]],
                     [[1, 2, 3, 3], [2, 3, 1, 0], [1, 2, 0, 2]]])
     tensor = dtensor(data)
     P, fit, itr = cp_als(tensor, 3, init='random')
     T.testing.assert_array_almost_equal(
         P.toarray(), reconstruct_tensor(P.U, P.lmbda, (2, 3, 4)))
Beispiel #12
0
def corr_rela(handle_train, ori_speeddata):
    data, miss_data1, W1, ori_W1 = handle_train
    #data = dtensor(data).unfold(2)
    for i in range(data.ndim):
        data = dtensor(miss_data1).unfold(i)
        ds = data.shape
        S = 0
        for j in range(ds[0]):
            for k in range(j + 1, ds[0]):
                C = pearson_dis(data[j, :], data[k, :])
                S += C
        S /= (ds[0] * (ds[0] - 1) / 2)
        print(str(i) + '_CC', S)
    return
    Y1 = []
    Y2 = []
    r = 60
    for i in range(ds[2]):
        Y1.append(np.var(data[r, :, i]))
        Y2.append(np.std(data[r, :, i]) / np.mean(data[r, :, i]))
    plt.figure()
    #plt.plot(list(range(ds[1])),Y1,'r')
    plt.plot(list(range(ds[2])), Y2, 'b')
    plt.savefig(img_dir + str(r) + '_corr_rela.png')
    plt.close()
    return
def tensor_decomp(X):
    print("CP-ALS Decomposition.")
    T = dtensor(XX)
    P, fit, itr, exectimes = cp_als(T, 2, init='nvecs')
    proj = P.U[2]
    fproj = np.abs(np.fft.fft(proj, axis=0))[:XX.shape[-1] // 2, :]
    return fproj, proj
Beispiel #14
0
def Transform_featuresmatrix_into_tensor(matrix, firstsize, secondsize):
    size = np.array(matrix.shape, dtype=int)
    nbsamples = size[0]
    tensor = dtensor(np.zeros((nbsamples, firstsize, secondsize)))
    for k in range(nbsamples):
        tensor[k, :, :] = np.reshape(matrix[k, :], (firstsize, secondsize))
    return tensor
def approximate_params(netdef, params, approx_netdef, approx_params,
			 btd_config, max_iter, min_decrease):
	net = caffe.Net(netdef, params, caffe.TEST)
	net_approx = caffe.Net(approx_netdef, params, caffe.TEST)
	convs = [(k, v[0].data, v[1].data) for k, v in net.params.items() if k in btd_config.keys()]
	for conv, kernel, bias in convs:
		size = kernel.shape
		T, S, H, W = size[0:4]
		P = H * W
		s, t, blocks = btd_config[conv]
		# (T, S, H, W) -> (T, S, P)
	        kernel = kernel.reshape(T, S, P)	
		# compute BTD 
		t_ = int(t/blocks)
		s_ = int(s/blocks)
		rank = [t_, s_, P]
		print ('calculating BTD for {0}...').format(conv)
		btd, _ = pci(dtensor(kernel), blocks, rank, max_iter, min_decrease) 
		# BTD -> (c, C) (n, c/blocks, P) (N, n)
		kernel_a = np.concatenate([subtensor[1][1] for subtensor in btd], axis=1)
		kernel_b = np.concatenate([subtensor[0].ttm(subtensor[1][2], 2) for subtensor in btd], axis=0)
		kernel_c = np.concatenate([subtensor[1][0] for subtensor in btd], axis=1)
		# (c, C) -> (c, C, 1, 1)
		kernel_a = kernel_a.T.reshape(s, S, 1, 1)
		# (n, c/blocks, P) -> (n, c/blocks, H, W)
		kernel_b = kernel_b.reshape(t, s_, H, W)
		# (N, n) -> (N, n, 1, 1)
		kernel_c = kernel_c.reshape(T, t, 1, 1)
		# set kernel to low-rank model
		net_approx.params[conv + 'a'][0].data[...] = kernel_a
		net_approx.params[conv + 'b'][0].data[...] = kernel_b
		net_approx.params[conv + 'c'][0].data[...] = kernel_c
		# copy bias to low-rank model
		net_approx.params[conv + 'c'][1].data[...] = bias
	net_approx.save(approx_params)
Beispiel #16
0
    def get_tucker_factors(self):
        if self.pretrained is not None:
            raise AttributeError('Not implemented')
        else:
            weights = dtensor(self.weight.cpu())
            if self.bias is not None:
                bias = self.bias.cpu()
            else:
                bias = self.bias
            
            core, (U_cout, U_cin, U_dd) = tucker.hooi(weights,
                                                      [self.ranks[0],
                                                       self.ranks[1],
                                                       weights.shape[-1]], init='nvecs')
            core = core.dot(U_dd.T)

            w_cin = np.array(U_cin)
            w_core = np.array(core)
            w_cout = np.array(U_cout)
            
            if isinstance(self.layer, nn.Sequential):
                w_cin_old = self.layer[0].weight.cpu().data
                w_cout_old = self.layer[2].weight.cpu().data

                U_cin_old = np.array(torch.transpose(w_cin_old.reshape(w_cin_old.shape[:2]), 1, 0))
                U_cout_old = np.array(w_cout_old.reshape(w_cout_old.shape[:2]))
                
                w_cin = U_cin_old.dot(U_cin)
                w_cout = U_cout_old.dot(U_cout)

        w_cin = torch.FloatTensor(np.reshape(w_cin.T, [self.ranks[1], self.cin, 1, 1])).contiguous()
        w_core = torch.FloatTensor(np.reshape(w_core, [self.ranks[0], self.ranks[1], *self.kernel_size])).contiguous()
        w_cout = torch.FloatTensor(np.reshape(w_cout, [self.cout, self.ranks[0], 1, 1])).contiguous()

        return [w_cin, w_core,  w_cout], [None, None,  bias]
Beispiel #17
0
    def factorize(self, x, iterations=100, showProgress=False, default=True):
        if not default:
            x = dtensor(x)
            num_ways = len(self.factor[0])
            X_itr = []
            R = len(self.factor)
            for way_index in range(num_ways):
                X_cur = []
                for r in range(R):
                    X_cur.append(self.factor[r][way_index].tolist())
                X_itr.append(np.array(X_cur).T)

        for i1 in np.arange(1, iterations + 1):
            if showProgress:
                progress = "*" if 0 < (i1 % 20) \
                 else "[%d/%d]\n" % (i1, iterations)
                print(progress)
            if default:
                self.updateAllFactors(x, self.factor)
            else:
                # pdb.set_trace()
                X_itr = self.updateAllFactorsGradient(x, X_itr, num_ways, R)
                ktensor_X = ktensor(X_itr)
                import math
                error_X = math.sqrt(getError(x, ktensor_X,
                                             x.norm())) / x.norm()
                # print(error_X)
        if not default:
            result_factor = []
            for r in range(R):
                each_factor = []
                for way_index in range(num_ways):
                    each_factor.append(X_itr[way_index].T[r])
                result_factor.append(each_factor)
            self.factor = result_factor
def Multiplicative_initialization_NTD2(X, Coretensorsize):

    [K, If, It] = np.array(np.shape(X), dtype=int)
    epsilon = 0.001
    A1 = np.eye(K, Coretensorsize[0])
    model = NMF(n_components=Coretensorsize[1], init='random', random_state=0)
    W = model.fit_transform(X.unfold(1))
    A2 = W
    model = NMF(n_components=Coretensorsize[2], init='random', random_state=0)
    W = model.fit_transform(X.unfold(2))
    A3 = W
    G = dtensor(
        np.random.rand(Coretensorsize[0], Coretensorsize[1],
                       Coretensorsize[2]))
    for i in range(3):
        S = np.dot(G.unfold(1), np.transpose(np.kron(A3, A1)))
        numerator = np.dot(X.unfold(1), np.transpose(S))
        denumerator = np.dot(A2, np.dot(S, np.transpose(S)))
        denumerator = Replace_zero_elements(denumerator, epsilon)
        A2 = A2 * (numerator / denumerator)
        S = np.dot(G.unfold(2), np.transpose(np.kron(A2, A1)))
        numerator = np.dot(X.unfold(2), np.transpose(S))
        denumerator = np.dot(A3, np.dot(S, np.transpose(S)))
        denumerator = Replace_zero_elements(denumerator, epsilon)
        A3 = A3 * (numerator / denumerator)

        list_of_factors_matrices = [A1, A2, A3]
        G = Core_multiplicative_initialization(X, G, list_of_factors_matrices)
    return G, list_of_factors_matrices[1:3]
Beispiel #19
0
def get_cp_factors(layer, rank, cin, cout, kernel_size, **kwargs):
    weights, bias = get_weights_and_bias(layer)
    w_h = None
    w_w = None
    w_cin = None
    w_cout = None

    if isinstance(layer, keras.Sequential):
        w_cout, w_cin, w_h, w_w = recompress_ncpd_tensor(weights, new_rank=rank, max_cycle=500, return_fit=False,
                                                         tensor_format="cpd")
    elif isinstance(layer, keras.layers.Conv2D):
        P, _, _ = cp_als(dtensor(weights), rank, init="random")
        w_cin, w_cout, w_h, w_w = extract_weights_tensors(P)

    # Reshape to proper kernel sizes.
    w_h = w_h.T.reshape((rank, 1, kernel_size[0], 1))
    w_w = w_w.T.reshape((rank, 1, 1, kernel_size[1]))
    w_cin = w_cin.T.reshape((rank, cin, 1, 1))
    w_cout = w_cout.reshape([cout, rank, 1, 1])

    # Reorder to TensorFlow order.
    w_cin, w_cout = [to_tf_kernel_order(w) for w in [w_cin, w_cout]]

    # The middle layers are depthwise it should have order
    # [rank, 1, kernel_size, kernel_size].
    # This reorders it correctly from TensorFlow order to PyTorch order
    # w_h, w_w = [depthwise_to_pytorch_kernel_order(w) for w in [w_h, w_w]].
    w_h, w_w = [to_tf_kernel_order(w) for w in [w_h, w_w]]

    return [w_cin, w_h, w_w, w_cout], [None, None, None, bias]
Beispiel #20
0
 def cp_withoutPreDefRank(self,T):        
     rank=1
     for r in range(1,50):        
         logging.warning( "Testing the rank "+r.__str__())
         P= self.claculateCP(T,r) 
         logging.warning( "T norm "+dtensor(T.toarray()).norm().__str__())
         logging.warning( "P norm "+P.norm().__str__())
         gc.collect()
         #if np.allclose(dtensor(T.toarray()), dtensor(P.toarray()),atol=1e-3,rtol=1e-5):
         if (P.norm()*100/dtensor(T.toarray()).norm())>95:
             logging.warning( "Rank founded : "+r.__str__())
             rank = r
             break
         logging.warning( "not a good rank")
         P=None
     return P,rank
Beispiel #21
0
def candecomp(T, K, outer_iterations="k", inner_iterations="k", **kwargs):
    """
    Return the canonical rank K decomposition of a tensor T.
    @assumptions - T is symmetric orthogonal
    @params L - number of projections
    """
    assert prerequisites(T, K)
    if outer_iterations == "k":
        outer_iterations = 2 * K
    else:
        assert int(outer_iterations)
    if inner_iterations == "k":
        inner_iterations = 10 * int(np.ceil(np.log(K)))
    else:
        assert int(inner_iterations)

    T = dtensor(T)

    # (1) Outer loop - find distinct eigen vectors
    pi, W = [], []
    for _ in xrange(K):
        l, v = find_max_eigenpair(T, outer_iterations, inner_iterations)
        pi.append(l)
        W.append(v)
        T = deflate(T, l, v)
    pi, W = np.array(pi), np.array(W).T

    return pi, W, W, W
Beispiel #22
0
def valid_3ord():
    print('\nCorrectness benchmark for 3 order tensor HOSVD.\n')
    
    shape = (2, 3, 4)
    max_iter = 30
    print('----------TensorNP----------')
    norm_errors = 0
    for _ in range(max_iter):
        tensor = tnp.randn(2, 3, 4)
        g, factors = tnp.hosvd(tensor, compute_core=True)
        rec_tensor = tnp.reconstruct_hosvd(g, factors)
        norm_error = tnp.linalg.norm(rec_tensor - tensor) / tnp.linalg.norm(tensor)
        norm_errors += norm_error
    print(f'error ({norm_errors/max_iter})')

    print('----------scikit-tensor----------')
    norm_errors = 0
    for _ in range(max_iter):
        tensor = tnp.randn(2, 3, 4)
        skt_tensor = skt.dtensor(tensor)
        factors, g = skt.tucker.hosvd(skt_tensor, rank=[2, 3, 4])
        rec_tensor = tnp.reconstruct_hosvd(g, factors)
        norm_error = tnp.linalg.norm(rec_tensor - tensor) / tnp.linalg.norm(tensor)
        norm_errors += norm_error
    print(f'error ({norm_errors/max_iter})')
Beispiel #23
0
def candecomp( T, K, outer_iterations = "k", inner_iterations = "k", **kwargs ):
    """
    Return the canonical rank K decomposition of a tensor T.
    @assumptions - T is symmetric orthogonal
    @params L - number of projections
    """
    assert prerequisites(T, K)
    if outer_iterations == "k":
        outer_iterations = 2*K
    else:
        assert int(outer_iterations)
    if inner_iterations == "k":
        inner_iterations = 10*int(np.ceil(np.log(K)))
    else:
        assert int(inner_iterations)

    T = dtensor(T)

    # (1) Outer loop - find distinct eigen vectors
    pi, W = [], []
    for _ in xrange(K):
        l, v = find_max_eigenpair(T, outer_iterations, inner_iterations)
        pi.append(l)
        W.append(v)
        T = deflate(T, l, v)
    pi, W = np.array(pi), np.array(W).T
    
    return pi, W, W, W
Beispiel #24
0
    def test_mttkrp(self):
        t = T.array([[[0, 1, 3, 4], [4, 0, 2, 1], [4, 2, 3, 4]],
                     [[2, 4, 2, 3], [3, 3, 2, 4], [2, 3, 0, 2]]])
        dtensor = sktensor.dtensor(t)

        factors = [None] * 3
        for i in range(3):
            factors[i] = T.random.rand(t.shape[i], 3)
        # print(factors)
        # print(dtensor.uttkrp(factors, 0))
        # print(mttkrp(t, factors, 0))
        # T.testing.assert_array_almost_equal(dtensor.uttkrp(factors, 0),
        #     mttkrp(t, factors, 0))

        # 对展开的细节理解不一样,导致结果不一样,但是两种算法都没有问题
        # T.testing.assert_array_almost_equal(dtensor.unfold(0), unfold(t, 0))
        from sktensor.core import khatrirao
        from .. import seq_kr

        order = list(range(0)) + list(range(0 + 1, 3))
        T.testing.assert_array_almost_equal(
            seq_kr(factors, exclude=0, reverse=True),
            khatrirao(tuple(factors[i] for i in order), reverse=True))

        order = list(range(1)) + list(range(1 + 1, 3))
        T.testing.assert_array_almost_equal(
            seq_kr(factors, exclude=1, reverse=True),
            khatrirao(tuple(factors[i] for i in order), reverse=True))
Beispiel #25
0
    def get_cp_factors(self):

        if '__getitem__' in dir(self.layer):
            f_cout_old, f_cin_old, f_h_old, f_w_old = self.weight

            f_cout_old = np.array(f_cout_old)
            f_cin_old = np.array(f_cin_old)
            f_h_old = np.array(f_h_old)
            f_w_old = np.array(f_w_old)

            bias = self.bias

            f_cout, f_cin, f_h, f_w = recompress_ncpd_tensor(
                [f_cout_old, f_cin_old, f_h_old, f_w_old],
                new_rank=self.rank,
                max_cycle=500,
                return_fit=False,
                tensor_format='cpd')

        else:
            if self.weight.is_cuda:
                self.weight = self.weight.cpu()
                if self.bias is not None:
                    self.bias = self.bias.cpu()
            weights = dtensor(self.weight)
            bias = self.bias

            T = dtensor(weights)
            P, fit, itr, exectimes = cp_als(T, self.rank, init='random')

            f_w = (np.array(P.U[3]) * (P.lmbda))
            f_h = np.array(P.U[2])
            f_cin = np.array(P.U[1])
            f_cout = np.array(P.U[0])

        f_h = torch.FloatTensor(
            np.reshape(f_h.T,
                       (self.rank, 1, self.kernel_size[0], 1))).contiguous()
        f_w = torch.FloatTensor(
            np.reshape(f_w.T,
                       [self.rank, 1, 1, self.kernel_size[1]])).contiguous()
        f_cin = torch.FloatTensor(
            np.reshape(f_cin.T, [self.rank, self.cin, 1, 1])).contiguous()
        f_cout = torch.FloatTensor(
            np.reshape(f_cout, [self.cout, self.rank, 1, 1])).contiguous()

        return [f_cin, f_h, f_w, f_cout], [None, None, None, bias]
Beispiel #26
0
 def generateTensor(self):
     entities = ["Alex","Bob","chris","Don","Elly"]
     predicates = ["Loves","Hates"]
     T = np.zeros((5, 5, 2))
     T[:, :, 0] = [[0,0,0,1,1], [0,0,0,0,0], [0,0,0,0,0], [1,0,0,0,0], [1,0,0,0,0]]
     T[:, :, 1] = [[0,1,0,0,0], [0,0,1,0,0], [0,1,0,0,0], [0,0,1,0,0], [0,1,1,0,0]]
     T = dtensor(T)
     return T,None,entities,predicates
Beispiel #27
0
def vis_ind(A, fname = 'hilbert'):
	
	#Visualize indices
	_, ind = ten_id(A, r = 10, method = 'id')
	plotindlst(dtensor(A), ind)
	plt.savefig('figs/indlst' + fname + '.png')

	return
Beispiel #28
0
def vis_ind(A, fname='hilbert'):

    #Visualize indices
    _, ind = ten_id(A, r=10, method='id')
    plotindlst(dtensor(A), ind)
    plt.savefig('figs/indlst' + fname + '.png')

    return
Beispiel #29
0
def Product_with_factors(coretensor, factor_list):
    approxim = np.copy(coretensor)
    approxim = dtensor(approxim)
    mode = -1
    for factor_matrix in factor_list:
        mode = mode + 1
        approxim = approxim._ttm_compute(factor_matrix, mode, False)
    return approxim
 def as_dtensor(self):
     sz = self.original_tensor_size
     order = np.concatenate((self.row_indices, self.column_indices))
     order = order.tolist()
     data = self.data.reshape(get_elements_at(sz, order))
     # transpose + argsort(order) equals ipermute
     data = data.transpose(np.argsort(order))
     return dtensor(data)
Beispiel #31
0
def sthoid(T, rank=None, method='rrqr', compute_core=True):
    """
	Parameters:
	-----------
	T:	(I_1,...,I_d) dtensor
		object of dtensor class. See scikit.dtensor
	
	rank:	(r_1,...,r_d) int array, optional
		Ranks of the individual modes	

	method:	{'rrqr'(default),'dime'} string, optional
		deim uses DEIM method, dime uses pivoted QR, rrqr uses RRQR, 'lev' uses leverage scores approach
		Uses 'rrqr' by default
	
	compute_core: bool, optional
		Compute core tensor by projection. True by default

	Returns:
	--------
	T:	object of Tucker class
		G - core, U - list of mode vectors, I - index list. 
	
	"""

    dim = T.ndim
    assert (rank is not None) or (eps is not None)

    #Core tensor and orthonormal factors
    ulst, indlst, modeerr = [], [], []

    G = dtensor(T)
    for d in range(dim):
        mode = G.unfold(d)
        r = rank[d]
        u, _, vh = svdtruncate(mode, r)
        v = vh.conj().T

        if method == 'rrqr':
            _, _, p = srrqr(v.conj().T, r)
            fact = norm(inv(v[p, :]))
        elif method == 'deim':
            p, fact = deim(v)
        elif method == 'dime':
            p, fact = dime(v)
        else:
            raise NotImplementedError

        c = T.unfold(d)[:, p]

        G = G.ttm(pinv(c, rcond=1.e-8), d)
        if d > 0:
            Tk = G.ttm(ulst, mode=list(range(d))).unfold(d)
            _, _, v = lowrank_to_svd(c, Tk.conj().T)
        ulst.append(c)
        indlst.append(p)
        modeerr.append(fact)

    return Tucker(G=G, U=ulst, I=indlst), modeerr
Beispiel #32
0
def sthoid(T, rank = None,  method = 'rrqr', compute_core = True):
	"""
	Parameters:
	-----------
	T:	(I_1,...,I_d) dtensor
		object of dtensor class. See scikit.dtensor
	
	rank:	(r_1,...,r_d) int array, optional
		Ranks of the individual modes	

	method:	{'rrqr'(default),'dime'} string, optional
		deim uses DEIM method, dime uses pivoted QR, rrqr uses RRQR, 'lev' uses leverage scores approach
		Uses 'rrqr' by default
	
	compute_core: bool, optional
		Compute core tensor by projection. True by default

	Returns:
	--------
	T:	object of Tucker class
		G - core, U - list of mode vectors, I - index list. 
	
	"""

	dim   = T.ndim
	assert (rank is not None) or (eps is not None)

	#Core tensor and orthonormal factors
	ulst, indlst, modeerr = [], [], []
	
	G = dtensor(T)
	for d in range(dim):
		mode = G.unfold(d)
		r = rank[d]
		u, _, vh = svdtruncate(mode, r); v = vh.conj().T;
		
		if method == 'rrqr':
			_, _, p = srrqr(v.conj().T, r)
			fact = norm(inv(v[p,:]))
		elif method == 'deim':
			p, fact = deim(v)
		elif method == 'dime':
			p, fact = dime(v)
		else:
			raise NotImplementedError

		c = T.unfold(d)[:, p]

		G = G.ttm(pinv(c, rcond = 1.e-8), d)		
		if d > 0:
			Tk = G.ttm(ulst, mode = list(range(d))).unfold(d)
			_, _, v = lowrank_to_svd(c, Tk.conj().T)
		ulst.append(c)
		indlst.append(p)
		modeerr.append(fact)
	
	return Tucker(G = G, U = ulst, I = indlst), modeerr
def Informationtensor(labels,Jf,Jt,nbclasses,nbinstances):
    labelscondition=np.min(np.unique(labels))
    if(labelscondition==0):
       raise AssertionError("The minimum value of a label must be 1") 
    if(labelscondition!=0):
       Infotensor=np.zeros((nbinstances,Jf,Jt))
       for k in range(nbinstances):
          Infotensor[k,(int(labels[k])-1)*int(Jf/nbclasses):int(labels[k])*int(Jf/nbclasses),(int(labels[k])-1)*int(Jt/nbclasses):int(labels[k])*int(Jt/nbclasses)]=np.ones((int(Jf/nbclasses),int(Jt/nbclasses)))
       return dtensor(Infotensor)
Beispiel #34
0
def residual(X, ranks, method='hooi', p=None):
    X = st.dtensor(X)
    if method == 'hooi':
        C, Us = st.tucker_hooi(X, ranks, init='nvecs')
    elif method == 'randomized':
        C, Us = randomized_hooi(X, ranks)
    elif method == 'mach':
        C, Us = mach(X, ranks, p)
    return np.sum((X - C.ttm(Us)) ** 2)
Beispiel #35
0
def test_cpd_als_sktensor(benchmark):
    for datatype in BACKEND_TYPES:

        _, input_tensor_val = init_rand_cp(dim, size, rank)
        benchmark(sk_cp_als,
                  dtensor(input_tensor_val),
                  rank=rank,
                  max_iter=1,
                  init='random')
Beispiel #36
0
def main():
    h = 3
    w = 3
    c = 512
    n = 512
    cr = 153
    T = dtensor(np.random.rand(h*w, c, n))
    R = 4
    rank = [9, int(cr/R), int(cr/R)]
    pci(T, R, rank)
Beispiel #37
0
def decomposition_for_core_retrieving_parallelized(X,U,pool):
    [K,I2,I3]=np.array(X.shape,dtype=int)
    size_factor2=np.array(U[0].shape,dtype=int)
    J2=size_factor2[1]
    size_factor3=np.array(U[1].shape,dtype=int)
    J3=size_factor3[1]
    vecG=np.zeros(K*J2*J3)
    vecG=pool.map(retrieving_a_small_part_of_the_core,[ [X,U,k] for k in range(K)])
    G_result=dtensor(np.reshape(vecG,(K,J2,J3)))
    return G_result
def Informationtensor(labels, Jf, Jt, nbclasses, nbinstances):
    Infotensor = np.zeros((nbinstances, Jf, Jt))
    for k in range(nbinstances):
        #pdb.set_trace()
        Infotensor[k, (labels[k] - 1) * int((Jf / nbclasses)):labels[k] *
                   (int(Jf / nbclasses)),
                   (labels[k] - 1) * (int(Jt / nbclasses)):labels[k] *
                   (int(Jt / nbclasses))] = np.ones(
                       (int(Jf / nbclasses), int(Jt / nbclasses)))
    return dtensor(Infotensor)
Beispiel #39
0
def test2(A, maxr = 10, fname = 'hilbert', ns = 10):
	
	A = dtensor(A)
	Amodes = matricize(A)	
	
	d = A.ndim

	err_deim = np.zeros((maxr,), dtype = 'd')	
	err_dime = np.zeros((maxr,), dtype = 'd')	
	err_rrqr = np.zeros((maxr,), dtype = 'd')	

	deim_mode_err = np.zeros((maxr,d), dtype = 'd')
	dime_mode_err = np.zeros((maxr,d), dtype = 'd')
	rrqr_mode_err = np.zeros((maxr,d), dtype = 'd')
		
	for r in np.arange(maxr):
		#Approximate the matrix using HOSVD
		rank = tuple((r+1)*np.ones((d,), dtype = 'i'))
		H = hosvd(A, rank, method = 'svd')
		err_deim[r], mode = lowrank_to_id(A, H, rank, method = 'deim')
		deim_mode_err[r,:] = mode
		err_dime[r], mode = lowrank_to_id(A, H, rank, method = 'dime')
		dime_mode_err[r,:] = mode
		err_rrqr[r], mode = lowrank_to_id(A, H, rank, method = 'rrqr')
		rrqr_mode_err[r,:] = mode


	plt.figure()
	plt.semilogy(np.arange(maxr) + 1, np.max(deim_mode_err,axis=1) , \
			'k-', linewidth = 2., label = r'DEIM') 
	plt.semilogy(np.arange(maxr) + 1, np.max(dime_mode_err,axis=1) , \
			'c-', linewidth = 2., label = r'PQR') 
	plt.semilogy(np.arange(maxr) + 1, np.max(rrqr_mode_err,axis=1) , \
			'r--', linewidth = 2., label = r'RRQR') 
	plt.xlabel('Rank [r]', fontsize = 18.)
	plt.ylabel(r'max. $||(P^*V)^{-1}||_F$' , fontsize = 18.)

	plt.title('Error constants', fontsize = 24)
	plt.legend(loc = 4)		
	plt.savefig('figs/deim_err_' + fname + '.png')


	plt.figure()
	
	plt.semilogy(np.arange(maxr)+1, err_deim, 'k-', linewidth = 3.)
	plt.semilogy(np.arange(maxr)+1, err_dime, 'c-', linewidth = 2.)
	plt.semilogy(np.arange(maxr)+1, err_rrqr, 'r--', linewidth = 2.)
	plt.legend(('DEIM', 'PQR', 'RRQR'), loc = 3)
	plt.xlabel('Rank [r]', fontsize = 18.)
	plt.ylabel('rel. err.' , fontsize = 18.)
	plt.title('Relative Error', fontsize = 24)
	plt.savefig('figs/deim_' + fname + '.png')	

	return	
Beispiel #40
0
def preprocess(X):
    """Preprocesses input data tensor.

    If data is sparse, returns an int sptensor.
    Otherwise, returns an int dtensor.
    """
    if not np.issubdtype(X.dtype, int):
        X = X.astype(int)
    if isinstance(X, np.ndarray) and is_sparse(X):
        X = sptensor_from_dense_array(X)
    else:
        X = skt.dtensor(X)
    return X
def load_data():
    all_subjects = range(1, 24)

    X = []
    y = []
    ids_test = []
    label_count = []

    n_basis = 10
    all_U0 = []
    all_U2 = []
    for n, subject in enumerate(all_subjects):
        if subject < 17:
            filename = 'data/train_subject%02d.mat' % subject
        else:
            filename = 'data/test_subject%02d.mat' % subject
        print("Loading", filename)
        data = loadmat(filename, squeeze_me=True)
        XX = window_filter_baseline(data['X'])
        mask = get_outlier_mask(XX)
        T = dtensor(XX)
        r = cp_als(T, rank=n_basis)
        U0 = r[0].U[0]
        U1 = r[0].U[1]
        U2 = r[0].U[2]
        X.append(XX)
        all_U0.append(U0)
        all_U2.append(U2)
        if subject < 17:
            yy = data['y'].ravel()
            y.append(yy)
        else:
            ids = data['Id']
            ids_test.append(ids)
        label_count += [subject] * len(XX)

    all_U0 = np.vstack(all_U0)
    all_U2 = np.vstack(all_U2)
    X = np.vstack(X)
    y = np.concatenate(y)

    cv = StratifiedShuffleSplit(yy, n_iter=50, test_size=.1)
    selection_pipe = Pipeline([('scaler', StandardScaler()),
                               ('estimator', LassoLarsCV(cv=cv))])
    selection_pipe.fit(all_U0[:y.shape[0]], y * 2 - 1)
    weights = selection_pipe.steps[1][1].coef_
    selected = np.where(weights != 0)[0]
    proj = all_U2[:, selected].T
    ids_test = np.concatenate(ids_test)
    from IPython import embed; embed()
    return np.dot(X, proj), y, ids_test, label_count
def approx_CP_R(value, R):
    if value.ndim < 2:
        return value
    T = dtensor(value)
    P, fit, itr, exetimes = cp_als(T, R, init='random')
    Y = None
    for i in range(R):
        y = P.lmbda[i]
        o = None
        for l in range(T.ndim):
            o = P.U[l][:,i] if o is None else np.outer(o, P.U[l][:,i])
        y = y * o
        Y = y if Y is None else Y+y
    return Y
Beispiel #43
0
def example2b(n = 20, opt = 1):

	#Generate Tucker factorization
	T = generate_tensor(n = n, opt = 1)
	modes = T.matricize()
	
	#Generate full tensor
	A = dtensor(np.reshape(np.array(modes[0]),(n,n,n)))
	
	#Plot Error using DEIM and DIME and leverage score
	test3(A, maxr = 15, fname = 'sparse_10', ns = 15) 
	test3(A, maxr = 15, fname = 'sparse_all', ns = -1) 

	return
Beispiel #44
0
def nucnorm(x0, rho, gamma, mode=None):
    """
    Proximal operator for the nuclear norm (sum of the singular values of a matrix)

    Parameters
    ----------
    x0 : array_like
        The starting or initial point used in the proximal update step

    rho : float
        Momentum parameter for the proximal step (larger value -> stays closer to x0)

    gamma : float
        A constant that weights how strongly to enforce the constraint

    model : int, optional
        If None (default), then the input is treated as a numpy array. If an integer, it is
        treated as a tensor object and the nuclear norm is applied to an unfolding of
        the tensor (using the array axis given by mode).

    Returns
    -------
    theta : array_like
        The parameter vector found after running the proximal update step

    """

    # if tensor, generate unfolded array
    if mode is not None:
        x_temp = dtensor(x0).unfold(mode)
    else:
        x_temp = x0

    # compute SVD
    u, s, v = np.linalg.svd(x_temp, full_matrices=False)

    # soft threshold the singular values
    sthr = np.maximum(s - (gamma / float(rho)), 0)

    # reconstruct
    x_out = (u.dot(np.diag(sthr)).dot(v))

    # if tensor, refold
    if mode is not None:
        x_out = x_out.fold()

    return x_out
Beispiel #45
0
def example2(n = 20, opt = 1, ns = 10, visualize = False):

	#Generate Tucker factorization
	T = generate_tensor(n = n, opt = 1)
	modes = T.matricize(method = 'kron')
	
	#Generate full tensor
	A = dtensor(np.reshape(np.array(modes[0]),(n,n,n)))
	
	#Plot error using HOSVD, HORSVD, HOID
	test1(A, maxr = 15, fname = 'sparse')
	#Plot Error using DEIM and DIME and leverage score
	test2(A, maxr = 15, fname = 'sparse', ns = ns) 

	if visualize: vis_ind(A, fname = 'sparse')

	return
Beispiel #46
0
def test3(A, maxr = 10, fname = 'hilbert', ns = 10):
	
	A = dtensor(A)
	Amodes = matricize(A)	
	
	H = hosvd(A, rank = A.shape, method = 'svd')
	err_lev, lev_mode_err = leverage_accuracy(A, H, maxr, ns = ns)	

	plt.figure()
	plt.semilogy(np.arange(maxr) + 1, lev_mode_err, linewidth = 2.) 
	plt.xlabel('Rank [r]', fontsize = 18.)
	plt.ylabel(r'$||(P^*V)^{-1}||_F$' , fontsize = 18.)
	plt.legend(('Mode 1', 'Mode 2', 'Mode 3'))
	plt.title('Error constants - Leverage scores ', fontsize = 20)
	plt.savefig('figs/lev_err_' + fname + '.png')


	return	
Beispiel #47
0
def prepare_models(LAYER, R, NET_PATH, NET_NAME, INPUT_DIM):
    PATH = NET_PATH
    NET_PREFIX = PATH + NET_NAME
    input_dim = INPUT_DIM
    
    model = load_model(NET_PREFIX + '.prototxt')
    ind = find_layer_by_name(model, LAYER)
    new_model = accelerate_model(model, ind, R)
    save_model(new_model, NET_PREFIX + '_accelerated.prototxt')
    new_deploy = create_deploy_model(new_model, input_dim)
    save_model(new_deploy, NET_PREFIX + '_accelerated_deploy.prototxt')
    deploy = create_deploy_model(model, input_dim)
    save_model(deploy, NET_PREFIX + '_deploy.prototxt')

    net = caffe.Classifier(NET_PREFIX + '_deploy.prototxt', NET_PREFIX + '.caffemodel')
    fast_net = caffe.Classifier(NET_PREFIX + '_accelerated_deploy.prototxt', NET_PREFIX + '.caffemodel')

    l = ind - 1#layer index in deploy version
    weights = net.layers[l].blobs[0].data
    bias = net.layers[l].blobs[1]

    T = dtensor(weights)
    P, fit, itr, exectimes = cp_als(T, R, init='random')
    f_x = (np.array(P.U[3])*(P.lmbda)).T
    f_y = np.array(P.U[2]).T
    f_c = np.array(P.U[1]).T
    f_n = np.array(P.U[0]) 

    n = model.layer[ind].convolution_param.num_output # OUTPUT
    d = model.layer[ind].convolution_param.kernel_size[0] # KERNEL SIZE
    c = weights.shape[1] # INPUT 
    
    f_c = np.reshape(f_c, [R, c, 1, 1]) # 1
    f_y = np.reshape(f_y, [R, 1, d, 1]) # 2
    f_x = np.reshape(f_x, [R, 1, 1, d]) # 3
    f_n = np.reshape(f_n, [n, R, 1, 1]) # 4 

    np.copyto(fast_net.layers[l].blobs[0].data, f_c)
    np.copyto(fast_net.layers[l+1].blobs[0].data, f_y)
    np.copyto(fast_net.layers[l+2].blobs[0].data, f_x)
    np.copyto(fast_net.layers[l+3].blobs[0].data, f_n)
    np.copyto(fast_net.layers[l+3].blobs[1].data, bias.data)

    fast_net.save(NET_PREFIX + '_accelerated.caffemodel')
Beispiel #48
0
def sthosvd(T, rank = None, eps = None, method = 'svd'):
	"""
	Parameters:
	-----------
	T:	(I_1,...,I_d) dtensor
		object of dtensor class. See scikit.dtensor
		
	rank:	(r_1,...,r_d) int array, optional
		Ranks of the individual modes	

	eps:	float, optional	
		Relative error of the representation

	method:	{'svd','randsvd'} string 
		SVD uses numpy SVD, RandSVD uses scipy.linalg.interpolative
	
	Returns:
	--------
	T:	object of Tucker class
		G - core, U - list of mode vectors. Returned if compute_core = True


	"""
	
	dim   = T.ndim

	#Core tensor and orthonormal factors
	ulst = []
	G = dtensor(T)
	for d in range(dim):
		mode = G.unfold(d)
		if rank is not None:	eps_or_k = rank[d]
		if method == 'svd':
			u, _, _ = svdtruncate(mode, eps_or_k)	
		elif method == 'randsvd':
			u, _, _ = randsvd(mode, eps_or_k)
		else:
			raise NotImplementedError
		ulst.append(u)
		
		#Recompute the core tensor (slightly more expensive)
		G = G.ttm(u, d, transp = True) 
		
	return Tucker(G = G, U = ulst) 
Beispiel #49
0
def smooth(x0, rho, gamma, mode=None):
    """
    Proximal operator for a smoothing function enforced via the discrete laplacian operator

    Parameters
    ----------
    x0 : array_like
        The starting or initial point used in the proximal update step

    rho : float
        Momentum parameter for the proximal step (larger value -> stays closer to x0)

    gamma : float
        A constant that weights how strongly to enforce the constraint

    model : int, optional
        If None (default), then the input is treated as a numpy array. If an integer, it is
        treated as a tensor object and the smoothing is applied to the columns of an unfolding of
        the tensor (using the array axis given by mode).

    Returns
    -------
    theta : array_like
        The parameter vector found after running the proximal update step

    """

    # if tensor, generate unfolded array
    if mode is not None:
        x_temp = dtensor(x0).unfold(mode)
    else:
        x_temp = x0

    # Apply Laplacian smoothing
    n = x_temp.shape[0]
    lap_op = spdiags([(2 + rho / gamma) * np.ones(n), -1 * np.ones(n), -1 * np.ones(n)], [0, -1, 1], n, n, format='csc')
    x_out = spsolve(gamma * lap_op, rho * x_temp)

    if mode is not None:
        tmp = x_temp.copy()
        tmp[:, :] = x_out[:, :]
        x_out = tmp.fold()

    return x_out
Beispiel #50
0
def cp_conv(weights, img, iter, w_shp, size, N_i, N_j, rank):

    # Define parameters
    (F, C, X, Y) = w_shp
    (N, C, H, W) = size

    # Instantiate 4D tensor for input
    input = T.tensor4(name='input')

    # Initialize shared variable for weights.
    weights = weights.eval()
    W_tensor =  dtensor(weights)

    # Apply CP-Decomposition on the clustered weight tensor
    P, fit, itr, exectimes = cp_als(W_tensor, rank, init='random')

    output = None
    for k in range(rank):

        T_F = theano.shared(np.reshape(P.U[0][:,k], (F, 1, 1, 1)), name='F_{0}'.format(k))
        T_C = theano.shared(np.reshape(P.U[1][:,k], (1, C, 1, 1)), name='C_{0}'.format(k))
        T_X = theano.shared(np.reshape(P.U[2][:,k], (1, 1, X, 1)), name='X_{0}'.format(k))
        T_Y = theano.shared(np.reshape(P.U[3][:,k], (1, 1, 1, Y)), name='Y_{0}'.format(k))

        # Apply convolution on each dimension individually
        conv_C = conv.conv2d(input, T_C)
        conv_X = conv.conv2d(conv_C, T_X)
        conv_Y = conv.conv2d(conv_X, T_Y)
        conv_F = conv.conv2d(conv_Y, T_F)

        output = output + conv_F if output else conv_F

    # Map Theano function
    f = theano.function([input], output, profile=False)

    # Execute Theano function
    times = []
    for i in range(iter):
        start = time.time()
        filtered_img = f(img)
        done = time.time()
        times.append(done-start)
    avg1 = np.mean(times)
    return filtered_img, avg1
Beispiel #51
0
def preprocess(X):
    """Preprocesses input dense data tensor.

    If data is sparse, returns an int sptensor.
    Otherwise, returns an int dtensor.
    """
    if isinstance(X, skt.sptensor):
        if not np.issubdtype(X.dtype, int):
            X.vals = X.vals.astype(int)
            X.dtype = int
        return X
    else:
        if not np.issubdtype(X.dtype, int):
            X = X.astype(int)
        if is_sparse(X):
            return sptensor_from_dense_array(X)
        else:
            if not isinstance(X, skt.dtensor):
                return skt.dtensor(X)
Beispiel #52
0
def cp_decomposition(weights, bias, rank):

    # Define parameters
    (F, C, X, Y) = weights.shape

    # Initialize shared variable for weights.
    W_tensor =  dtensor(weights)

    # Apply CP-Decomposition on the clustered weight tensor
    P, fit, itr, exectimes = cp_als(W_tensor, rank, init='random')

    output = []
    for k in range(rank):

        T_F = np.reshape(P.U[0][:,k], (F, 1, 1, 1))
        T_C = np.reshape(P.U[1][:,k], (1, C, 1, 1))
        T_X = np.reshape(P.U[2][:,k], (1, 1, X, 1))
        T_Y = np.reshape(P.U[3][:,k], (1, 1, 1, Y))
        output.append([T_C, T_X, T_Y, T_F, bias])
    return output
def get_basis(labels, data, n_components):
    # Generate decompositions
    print("Performing tensor decomposition of training data.")
    all_basis = []
    for n in np.unique(labels):
        idx = np.where(labels == n)[0]
        X = data[idx]
        grad1, grad2, mag1 = separate_grad_mag(X)
        grad = np.concatenate((grad1, grad2))
        # Magnetometers look real rough
        for idx in [grad, mag1]:
            Xi = X[:, idx, :]
            r = cp_als(dtensor(Xi), n_components, init="nvecs")
            r_good_idx = drop_fifty_and_ten_hz(r[0].U[2])
            basis = r[0].U[2][:, r_good_idx]
            all_basis.append(basis)

    basis = np.hstack(all_basis)
    del all_basis
    return basis
Beispiel #54
0
def test_dense_fold(T):
    X = dtensor(T)
    I, J, K = T.shape
    X1 = X[:, :, 0]
    X2 = X[:, :, 1]

    U = X.unfold(0)
    assert (3, 8) == U.shape
    for j in range(J):
        assert (U[:, j] == X1[:, j]).all()
        assert (U[:, j + J] == X2[:, j]).all()

    U = X.unfold(1)
    assert (4, 6) == U.shape
    for i in range(I):
        assert (U[:, i] == X1[i, :]).all()
        assert (U[:, i + I] == X2[i, :]).all()

    U = X.unfold(2)
    assert (2, 12) == U.shape
    for k in range(U.shape[1]):
        assert (U[:, k] == array([X1.flatten('F')[k], X2.flatten('F')[k]])).all()
def get_tensor(src='metro', daytype='workdays', n_component=5):
	lnglat_dict = get_lnglat_dict(src=src); travel = {}; test = {}
	locations = {ind:name for name,(ind,_,_) in lnglat_dict.iteritems()}
	for line in gzip.open('../../data/statistics/intermediate/MAE_{0}_{1}.txt.gz'.format(src,daytype)):
		_time, _src, _dst, _len, _mean = line.split()[:5]
		_lng_src, _lat_src = lnglat_dict[_src][1:]; _lng_dst, _lat_dst = lnglat_dict[_dst][1:]
		travel[_src] = travel.get(_src,{}); travel[_src][_dst] = travel[_src].get(_dst,[0]*24)
		travel[_src][_dst][int(_time)] = float(_mean)
		test[_src] = test[_dst] = 1
	tensor = [[[travel.get(locations[i],{}).get(locations[j],[0]*24)[k]\
				 for k in xrange(24)] for j in xrange(len(locations))] for i in xrange(len(locations))]
	np.save('../../data/tensor_data3/tensor_{0}_{1}.npy'.format(src,daytype),np.array(tensor))
	tensor = [[[100.0*tensor[i][j][k]/sum(tensor[i][j]) if sum(tensor[i][j])!=0 else 0 \
				for k in range(24)] for j in range(len(tensor[0]))] for i in range(len(tensor))]
	np.save('../../data/tensor_data3/tensor_normalized_{0}_{1}.npy'.format(src,daytype),np.array(tensor))
	T = dtensor(tensor)
	# tensor = np.array(tensor); approx = nonnegative_tensor_factorization(T, n_component, method='anls_asgroup', min_iter=20, max_iter=50).totensor()
	# print abs(tensor-approx).sum()/tensor.sum()
	matrix_src, matrix_dst, matrix_time = nonnegative_tensor_factorization(T, n_component, method='anls_asgroup', min_iter=20, max_iter=50).U
	matrix_time = np.transpose(matrix_time)
	matrix_file = '../../data/tensor_data3/matrix_{0}_{1}/{2}.npy'.format(src,daytype,'{0}')
	for _matrix, _name in [(matrix_src,'src'),(matrix_dst,'dst'),(matrix_time,'time')]: np.save(matrix_file.format(_name),_matrix)
Beispiel #56
0
	def lowrank_matricize(self, method = 'modemult'):

		G, U = self.G, self.U
		ndim = self.ndim	
		vlst = []
		if method == 'kron':
			modes = matricize(dtensor(G))
			for d, mode in zip(range(ndim),modes):
				lst = [U[i] for i in (range(d) + range(d+1, ndim))] 
				v = dot(mode, kron(lst, reverse = True).conj().T)
				vlst.append(v)
		elif method == 'modemult':
			
			for d in range(ndim):
				lst = [U[i] for i in (range(d) + range(d+1, ndim))] 
				v = G.ttm(lst, mode = list(range(d) + range(d+1, ndim))).unfold(d)
				vlst.append(v)
		else: 
			raise NotImplementedError	


		return U, vlst	
Beispiel #57
0
def test_dense_fold():
    X = dtensor(T)
    I, J, K = T.shape
    X1 = X[:, :, 0]
    X2 = X[:, :, 1]

    U = X.unfold(0)
    assert_equal((3, 8), U.shape)
    for j in range(J):
        assert_true((U[:, j] == X1[:, j]).all())
        assert_true((U[:, j + J] == X2[:, j]).all())

    U = X.unfold(1)
    assert_equal((4, 6), U.shape)
    for i in range(I):
        assert_true((U[:, i] == X1[i, :]).all())
        assert_true((U[:, i + I] == X2[i, :]).all())

    U = X.unfold(2)
    assert_equal((2, 12), U.shape)
    for k in range(U.shape[1]):
        assert_true((U[:, k] == array([X1.flatten("F")[k], X2.flatten("F")[k]])).all())
def Gradient_Tensor_Decomposition(A, K, steps=500, alpha=0.0002, beta=0.02, epsilon=1):
	# Initialization
	len_X, len_Y, len_Z = A.shape
	print len_X, len_Y, len_Z
	C = dtensor(np.random.rand(K,K,K))
	X = np.random.rand(len_X,K)
	Y = np.random.rand(len_Y,K)
	Z = np.random.rand(len_Z,K) 
	# Iteration
	R = C.ttm(X, 0).ttm(Y, 1).ttm(Z, 2)
	loss_last, loss_curr, step = float('inf'), compute_loss(A,R), 0
	for step in xrange(steps):
		print step
		for dim1 in xrange(len_X):
			print dim1
			for dim2 in xrange(len_Y):
				for dim3 in xrange(len_Z):
					loss_last = loss_curr
					_X, _Y, _Z, _C = np.copy(X), np.copy(Y), np.copy(Z), copy.deepcopy(C)
					a = A[dim1][dim2][dim3]
					r = C.ttm(np.array([X[dim1]]), 0)\
						 .ttm(np.array([Y[dim2]]), 1)\
						 .ttm(np.array([Z[dim3]]), 2)[0][0][0]
					Xi, Yi, Zi = np.array([X[dim1]]), np.array([Y[dim2]]), np.array([Z[dim3]])
					X[dim1] = Xi-alpha*(r-a)*C.ttm(Yi, 1).ttm(Zi, 2).reshape((1,K))-alpha*beta*Xi
					Y[dim2] = Yi-alpha*(r-a)*C.ttm(Xi, 0).ttm(Zi, 2).reshape((1,K))-alpha*beta*Yi
					Z[dim3] = Zi-alpha*(r-a)*C.ttm(Xi, 0).ttm(Yi, 1).reshape((1,K))-alpha*beta*Zi
					C = C-alpha*(r-a)*np.kron(np.kron(Xi,Yi),Zi).reshape((K, K, K))-alpha*beta*C
					R = C.ttm(X, 0).ttm(Y, 1).ttm(Z, 2)
					loss_curr = compute_loss(A,R)
					if loss_curr > loss_last:
						X, Y, Z, C = _X, _Y, _Z, _C
						loss_curr = loss_last
			print "loss_curr", loss_curr
		if loss_curr < epsilon:
			return C, [X, Y, Z]
	return C, [X, Y, Z]
i_list = pd.unique(X.iloc[:, 0])
j_list = pd.unique(X.iloc[:, 1])
k_list = pd.unique(X.iloc[:, 2])

i_list.sort()
j_list.sort()
k_list.sort()


# ### Creating and filling the tensor

# In[ ]:

T = np.zeros((len(i_list), len(j_list), len(k_list)))
T = dtensor(T)


# In[ ]:

# Going through the dataframe
for i, j, k in zip(X.sender, X.receiver, X.datetime):
    # Finding the corresponding index in the tensor
    i_ind = int(np.where(i_list == i)[0])
    j_ind = int(np.where(j_list == j)[0])
    k_ind = int(np.where(k_list == k)[0])
    
    # Modifying the tensor value for the tuple (i_ind, j_ind, k_ind)
    T[i_ind, j_ind, k_ind] += 1

# Logarithmic Transformation