示例#1
0
 def _rank_reduce(weights, rank_ratio):
     # Do not include the kernel dimensions
     eigen_length = min(weights.shape[:2])
     target_rank = [int(eigen_length * rank_ratio)]*2
     core, factors = partial_tucker(
         weights, modes=[0, 1], init="svd", svd="truncated_svd", rank=target_rank)
     return tensorly.tucker_to_tensor(core, factors)
示例#2
0
    def recover(self):
        '''
        Obtain the recovered tensor X_hat, core and arm tensor given the sketches
        using the one pass sketching algorithm 
        '''

        if self.phis == []:
            phis = self.get_phis()
        else:
            phis = self.phis
        Qs = []
        for arm_sketch in self.arm_sketches:
            Q, _ = np.linalg.qr(arm_sketch)
            Qs.append(Q)
        self.core_tensor = self.core_sketch
        dim = len(self.tensor_shape)
        for mode_n in range(dim):
            self.core_tensor = tl.tenalg.mode_dot(self.core_tensor, \
                np.linalg.pinv(np.dot(phis[mode_n], Qs[mode_n])), mode=mode_n)
        core_tensor, factors = tucker(self.core_tensor, ranks=self.ranks)
        self.core_tensor = core_tensor
        for n in range(dim):
            self.arms.append(np.dot(Qs[n], factors[n]))
        X_hat = tl.tucker_to_tensor(self.core_tensor, self.arms)
        return X_hat, self.arms, self.core_tensor
示例#3
0
def tucker_on_error_tensor(error_tensor, ranks=[15, 4, 2, 2, 8, 15], save_results=False, verbose=False):
    
    tensor_pred = np.nan_to_num(error_tensor)
    tensor_from_fac = np.zeros(error_tensor.shape)
    errors = []
    num_iterations = 0
    Ω = get_omega(error_tensor)

    # while(not stopping_condition(tensor, tensor_from_fac, threshold)):
    while(len(errors) <= 2 or errors[-1] < errors[-2] - 0.01):
        
        num_iterations += 1
        core, factors = tucker(tensor_pred, ranks=ranks)
        tensor_from_fac = tucker_to_tensor((core, factors))
        error = np.linalg.norm(np.multiply(Ω, np.nan_to_num(error_tensor - tensor_from_fac)))
        
        if verbose:
            if not num_iterations % 5:
                print("ranks: {}, iteration {}, error: {}".format(ranks, num_iterations, error))

        errors.append(error)
        tensor_pred = np.nan_to_num(error_tensor) + np.multiply(1-Ω, tensor_from_fac)
    
    core, factors = tucker(tensor_pred, ranks=ranks)
    
    if save_results:
        np.save(os.path.join(defaults_path, 'error_tensor_imputed.npy'), tensor_pred)
    return core, factors, tensor_pred, errors
示例#4
0
    def recover(self):
        '''
        Obtain the recovered tensor X_hat, core and arm tensor given the sketches
        using the two pass sketching algorithm 
        '''

        # get orthogonal basis for each arm
        Qs = []
        for sketch in self.arm_sketches:
            Q, _ = np.linalg.qr(sketch)
            Qs.append(Q)

        #get the core_(smaller) to implement tucker
        core_tensor = self.X
        N = len(self.X.shape)
        for mode_n in range(N):
            Q = Qs[mode_n]
            core_tensor = tl.tenalg.mode_dot(core_tensor, Q.T, mode=mode_n)
        core_tensor, factors = tucker(core_tensor, ranks=self.ranks)
        self.core_tensor = core_tensor

        #arm[n] = Q.T*factors[n]
        for n in range(len(factors)):
            self.arms.append(np.dot(Qs[n], factors[n]))
        X_hat = tl.tucker_to_tensor(self.core_tensor, self.arms)
        return X_hat, self.arms, self.core_tensor
 def factors_to_tensors(self):
     #factors_=tl.tensor([self.factors[0][:,self.temporal_component].reshape(-1,1),self.factors[1],self.factors[2]])
     factors_=tl.tensor([self.factors[0][:,:self.temporal_component*5+1].reshape(-1,self.temporal_component*5+1),self.factors[1],self.factors[2]])
     print(factors_.shape)
     print(self.core.shape)
     self.tensor_data=tucker_to_tensor((self.core[:self.temporal_component*5+1,:,:].reshape(-1,50,50), factors_))
     for j in range(0,self.tensor_data.shape[0]):
         self.tensor_data[j,:,:] *= 300.0/(self.tensor_data[j,:,:].max()+0.00001)
示例#6
0
 def forward(self, x):
     output, (h_n, c_n) = self.lstm(x)
     output_last_timestep = h_n[-1, :, :]
     out = tl.tucker_to_tensor((self.core.to(self.device), [
         output_last_timestep, self.factors[1].to(self.device),
         self.factors[2].to(self.device)
     ]))
     return out
示例#7
0
def reconstruct_tensor(num):
    core, U, I, A = get_re_tensor_element(num)
    factors = [U, I, A]
    TensorX_approximation = tensorly.tucker_to_tensor(
        core,
        factors,
    )
    return TensorX_approximation
示例#8
0
    def forward(self, x):
        """Combine the core, factors and bias, with input x to produce the (1D) output
        """

        regression_weights = tl.tucker_to_tensor((self.core, self.factors))

        #return inner(x,regression_weights,tl.ndim(x)-1)+self.bias
        return regression_weights, (
            tl.tenalg.contract(x, [1, 2, 3], regression_weights, [0, 1, 2]) +
            self.bias)
def rank_search_tucker(tensor, rank_range):
    AIC = {}
    for rank in combinations_with_replacement(range(1, rank_range + 1), 3):
        decomp = tucker(tensor, rank)
        recon = tl.tucker_to_tensor(decomp)
        err = tensor - recon
        rank_AIC = 2 * tl.tenalg.inner(err, err) + 2 * sum(rank)
        AIC[rank] = rank_AIC

    return AIC
示例#10
0
 def ho_svd(self):
     X = square_tensor_gen(self.n,
                           self.rank,
                           dim=self.dim,
                           typ=self.gen_typ,
                           noise_level=self.noise_level)
     start_time = time.time()
     core, tucker_factors = tucker(
         X, ranks=[self.rank for _ in range(self.dim)], init='random')
     X_hat = tl.tucker_to_tensor(core, tucker_factors)
     running_time = time.time() - start_time
     rerr = eval_mse(X, X_hat)
     return (-1, running_time), rerr
示例#11
0
def tucker_decomposition(X):
    N,C,H,W = X.shape
    rank = 4
    tucker_rank = [C,rank,rank]
    Tucker_reconstructions = torch.zeros_like(X).cpu()
    Cores = torch.zeros([N,C,rank,rank])
    for j,img in enumerate(X):
        core,tucker_factors = tucker(img,ranks = tucker_rank,init = 'random', tol = 1e-4,  random_state=np.random.RandomState())
        tucker_reconstruction = tl.tucker_to_tensor((core,tucker_factors))
        Tucker_reconstructions[j] = tucker_reconstruction
        Cores[j] = core

    return Cores
示例#12
0
def test_decompose_test():
	tr = tl.tensor(np.arange(24).reshape(3,4,2))
	print(tr)
	unfolded = tl.unfold(tr, mode=0)
	tl.fold(unfolded, mode=0, shape=tr.shape)

	#Apply Tucker decomposition
	core, factors = tucker(tr, rank=[3,4,2])
	print ("Core")
	print (core)
	print ("Factors")
	print (factors)

	print(tl.tucker_to_tensor(core, factors))
示例#13
0
    def f(self, x):
        x1 = x[:, 0]
        x2 = x[:, 1]

        ranks = [int(x1), int(x2)]

        core, [last, first] = partial_tucker(conv.weight.data.cpu().numpy(),
                                             modes=[0, 1],
                                             ranks=ranks,
                                             init="svd")

        recon_error = tl.norm(
            conv.weight.data.cpu().numpy() - tl.tucker_to_tensor(
                (core, [last, first])),
            2,
        ) / tl.norm(conv.weight.data.cpu().numpy(), 2)

        # recon_error = np.nan_to_num(recon_error)

        ori_out = conv.weight.data.shape[0]
        ori_in = conv.weight.data.shape[1]
        ori_ker = conv.weight.data.shape[2]
        ori_ker2 = conv.weight.data.shape[3]

        first_out = first.shape[0]
        first_in = first.shape[1]

        core_out = core.shape[0]
        core_in = core.shape[1]

        last_out = last.shape[0]
        last_in = last.shape[1]

        original_computation = ori_out * ori_in * ori_ker * ori_ker2
        decomposed_computation = ((first_out * first_in) +
                                  (core_in * core_out * ori_ker * ori_ker2) +
                                  (last_in * last_out))

        computation_error = decomposed_computation / original_computation

        if computation_error > 1.0:
            computation_error = 5.0

        Error = float(recon_error + computation_error)

        print("%d, %d, %f, %f, %f" %
              (x1, x2, recon_error, computation_error, Error))

        return Error
def tucker_filter(imgarray, tucker_rank, compfilename):
    """
    imgarray is 3-d numpy array of png image
    """
    imgtensor = tl.tensor(imgarray)

    # Tucker decomposition
    core, tucker_factors = tucker(imgtensor,
                                  ranks=tucker_rank,
                                  init='random',
                                  tol=10e-5,
                                  random_state=random_state)
    tucker_reconstruction = tl.tucker_to_tensor(core, tucker_factors)
    Image.fromarray(to_image(tucker_reconstruction)).save('../input/' +
                                                          compfilename)
    return 0
示例#15
0
    def forward(ctx, imgs):
        # Tucker_reconstructions = np.zeros_like(imgs)
        Tucker_reconstructions = torch.zeros_like(imgs).cpu()
        tucker_rank = [3, rank, rank]
        for j, img in enumerate(imgs):
            core, tucker_factors = tucker(img,
                                          ranks=tucker_rank,
                                          init='random',
                                          tol=1e-4,
                                          random_state=np.random.RandomState())
            tucker_reconstruction = tl.tucker_to_tensor((core, tucker_factors))
            Tucker_reconstructions[j] = tucker_reconstruction

        # Tucker_reconstructions = torch.from_numpy(Tucker_reconstructions)

        return Tucker_reconstructions
示例#16
0
def decomposition_elbow(dat):
    pal = sns.color_palette('bright',10)
    palg = sns.color_palette('Greys',10)
    mat1 = np.zeros((5,15))
    #finding the elbow point
    for i in range(2,15):
        for j in range(1,5):
            facs_overall = non_negative_tucker(dat,rank=[j,i,i],random_state = 2336)
            mat1[j,i] = np.mean((dat- tl.tucker_to_tensor(tucker_tensor = (facs_overall[0],facs_overall[1])))**2)

    figsize(10,5)
    plt.plot(2+np.arange(13),mat1[2][2:],c = 'red',label = 'rank = (2,x,x)')
    plt.plot(2+np.arange(13),mat1[1][2:],c = 'blue',label = 'rank = (1,x,x)')
    plt.xlabel('x')
    plt.ylabel('error')
    plt.show()
示例#17
0
 def test_tucker(self): 
     n = 100
     k = 10  
     rank = 5 
     dim = 3 
     s = 20 
     tensor_shape = np.repeat(n,dim)
     noise_level = 0.01
     gen_typ = 'lk' 
     Rinfo_bucket = RandomInfoBucket(random_seed = 1)
     X, X0 = square_tensor_gen(n, rank, dim=dim, typ=gen_typ,\
          noise_level= noise_level, seed = 1)    
     core, tucker_factors = run_hosvd(X,ranks=[1 for _ in range(dim)])
     Xhat = tl.tucker_to_tensor(core, tucker_factors)
     
     self.assertTrue(np.linalg.norm((X-X0).reshape(X.size,1),'fro')/np.linalg.norm\
         (X0.reshape(X.size,1), 'fro')<0.01) 
    def fit(self, X, y):
        X = X.astype(int)
        tensor = np.full(self.shape, np.nan)
        tensor[tuple(X.T)] = y

        if self.missing_val == 'mean':
            tensor[np.isnan(tensor)] = np.nanmean(tensor)
        else:
            tensor[np.isnan(tensor)] = self.missing_val

        self.core, self.factors = tucker(tensor,
                                         rank=self.rank,
                                         n_iter_max=self.n_iter_max,
                                         tol=self.tol,
                                         random_state=self.random_state,
                                         verbose=self.verbose)

        self.tucker_tensor = tl.tucker_to_tensor(self.core, self.factors)
        return self
def progressive_generation(imgfile, rankend, result_path):
    # max effective rankend is min(imgarray.shape[0],imgarray.shape[1])
    target = imgfile
    imgobj = Image.open(target)
    imgarray = np.array(imgobj)
    imgtensor = tl.tensor(imgarray)

    for rank in range(1, rankend + 1):
        fullfname = os.path.join(
            result_path,
            imgfile.split('/')[-1].split('.')[0] + '_' + str(rank) + '.png')
        core_rank = [rank, rank, 1]
        core, tucker_factors = tucker(imgtensor,
                                      ranks=core_rank,
                                      init='random',
                                      tol=10e-5,
                                      random_state=random_state)
        tucker_reconstruction = tl.tucker_to_tensor(core, tucker_factors)
        Image.fromarray(to_image(tucker_reconstruction)).save(fullfname)
示例#20
0
 def tensor_approx(self, method):
     start_time = time.time()
     if method == "hooi":
         core, tucker_factors = tucker(self.X, self.ranks, init='svd')
         X_hat = tl.tucker_to_tensor(core, tucker_factors)
         running_time = time.time() - start_time
         core_sketch = np.zeros(1)
         arm_sketches = [[] for i in np.arange(len(self.X.shape))]
         sketch_time = -1
         recover_time = running_time
     elif method == "twopass":
         sketch = Sketch(self.X,
                         self.ks,
                         random_seed=self.random_seed,
                         typ='g')
         arm_sketches, core_sketch = sketch.get_sketches()
         sketch_time = time.time() - start_time
         start_time = time.time()
         sketch_two_pass = SketchTwoPassRecover(self.X, arm_sketches,
                                                self.ranks)
         X_hat, _, _ = sketch_two_pass.recover()
         recover_time = time.time() - start_time
     elif method == "onepass":
         sketch = Sketch(self.X, self.ks, random_seed = self.random_seed, \
             ss = self.ss, store_phis = self.store_phis, typ = 'g')
         arm_sketches, core_sketch = sketch.get_sketches()
         sketch_time = time.time() - start_time
         start_time = time.time()
         sketch_one_pass = SketchOnePassRecover(arm_sketches, core_sketch, \
             TensorInfoBucket(self.X.shape, self.ks, self.ranks, self.ss),\
             RandomInfoBucket(random_seed = self.random_seed), sketch.get_phis())
         X_hat, _, _ = sketch_one_pass.recover()
         recover_time = time.time() - start_time
     else:
         raise Exception(
             "please use either of the three methods: hooi, twopass, onepass"
         )
     # Compute the the relative error when the true low rank tensor is unknown.
     # Refer to simulation.py in case when the true low rank tensor is given.
     rerr = eval_rerr(self.X, X_hat, self.X)
     return X_hat, core_sketch, arm_sketches, rerr, (sketch_time,
                                                     recover_time)
示例#21
0
    def f(self, x):
        x1 = x[:, 0]
        x2 = x[:, 1]

        ranks = [int(x1), int(x2)]

        core, [last, first] = partial_tucker(conv.weight.data,
                                             modes=[0, 1],
                                             ranks=ranks,
                                             init='svd')

        recon_error = tl.norm(
            conv.weight.data - tl.tucker_to_tensor(
                (core, [last, first])), 2) / tl.norm(conv.weight.data, 2)

        #recon_error = np.nan_to_num(recon_error)

        ori_out = conv.weight.data.shape[0]
        ori_in = conv.weight.data.shape[1]
        ori_ker = conv.weight.data.shape[2]
        ori_ker2 = conv.weight.data.shape[3]

        first_out = first.shape[0]
        first_in = first.shape[1]

        core_out = core.shape[0]
        core_in = core.shape[1]

        last_out = last.shape[0]
        last_in = last.shape[1]

        original_computation = ori_out * ori_in * ori_ker * ori_ker2
        decomposed_computation = (first_out * first_in) + (
            core_in * core_out * ori_ker * ori_ker2) + (last_in * last_out)

        computation_error = decomposed_computation / original_computation

        Error = float(recon_error + computation_error)

        return Error
示例#22
0
def tensor_svp_solve(A,
                     mask,
                     delta=0.1,
                     epsilon=1e-2,
                     max_iterations=1000,
                     k=(10, 10, 10),
                     taker_iters=1000,
                     R=None):
    """
     A : m x n x r тензор, который необходимо дополнить

     mask : m x n x r тензор из 0 и единиц,соответствующий отображению A(x) = mask*x
    """

    X = np.zeros_like(A)  #X = 0
    t = 0
    error = []
    error2 = []
    for t in range(max_iterations):
        Y = X - delta * mask * (X - A)  #вычисление Y
        #ортогональное разложение Таккера, рудукция к разложению с рангами k
        #и перемножение обратно в тензор
        X = tucker_to_tensor(
            tucker(Y,
                   ranks=k,
                   n_iter_max=taker_iters,
                   init='svd',
                   svd='numpy_svd',
                   tol=1e-3))
        e = fro_norm_tensor_2(mask * (X - A))

        error.append(e)
        error2.append(fro_norm_tensor_2((1 - mask) * (X - A)))
        #проверка условия завершения алгоритма
        #print(t,e)
        if (e < epsilon): break
    print(t)
    return X, np.array(error), error2
def TD(rate, core_dimension_1, core_dimension_2, core_dimension_3, ee, l3, yy,
       select_diease_id):
    A, ward_nor_list = get_tensor_A(rate, yy)
    dim1 = len(A)
    dim2 = len(A[0])
    dim3 = len(A[0][0])

    # size of core Tensor
    dimX = core_dimension_1
    dimY = core_dimension_2
    dimZ = core_dimension_3
    S = np.random.uniform(0, 0.1, (dimX, dimY, dimZ))
    R = np.random.uniform(0, 0.1, (dim1, dimX))
    C = np.random.uniform(0, 0.1, (dim2, dimY))
    T = np.random.uniform(0, 0.1, (dim3, dimZ))
    #print(R,C,T)
    nS, nR, nC, nT = Random_gradient_descent(A, S, R, C, T, ee, l3)
    A_result = tucker_to_tensor(nS, [nR, nC, nT])

    rmse, mae = test_loss(A_result[dim1 - 1][:][:], ward_nor_list, yy,
                          select_diease_id)  # 计算测试误差

    return rmse, mae
示例#24
0
    def recover(self):
        Qs = []
        for sketch in self.sketchs:
            Q, _ = np.linalg.qr(sketch)
            Qs.append(Q)
        phis = SketchOnePassRecover.get_phis(self.Rinfo_bucket,
                                             tensor_shape=self.tensor_shape,
                                             k=self.k,
                                             s=self.s)
        self.core_tensor = self.core_sketch
        dim = len(self.tensor_shape)
        for mode_n in range(dim):
            self.core_tensor = tl.tenalg.mode_dot(
                self.core_tensor,
                np.linalg.pinv(np.dot(phis[mode_n], Qs[mode_n])),
                mode=mode_n)

        core_tensor, factors = tucker(self.core_tensor,
                                      ranks=[self.rank for _ in range(dim)])
        self.core_tensor = core_tensor
        for n in range(dim):
            self.arms.append(np.dot(Qs[n], factors[n]))
        X_hat = tl.tucker_to_tensor(self.core_tensor, self.arms)
        return X_hat, self.arms, self.core_tensor
示例#25
0
cp_rank = 25
# Rank of the Tucker decomposition
tucker_rank = [100, 100, 2]

# Perform the CP decomposition
weights, factors = parafac(image, rank=cp_rank, init='random', tol=10e-6)
# Reconstruct the image from the factors
cp_reconstruction = tl.cp_to_tensor((weights, factors))

# Tucker decomposition
core, tucker_factors = tucker(image,
                              rank=tucker_rank,
                              init='random',
                              tol=10e-5,
                              random_state=random_state)
tucker_reconstruction = tl.tucker_to_tensor((core, tucker_factors))

# Plotting the original and reconstruction from the decompositions
fig = plt.figure()
ax = fig.add_subplot(1, 3, 1)
ax.set_axis_off()
ax.imshow(to_image(image))
ax.set_title('original')

ax = fig.add_subplot(1, 3, 2)
ax.set_axis_off()
ax.imshow(to_image(cp_reconstruction))
ax.set_title('CP')

ax = fig.add_subplot(1, 3, 3)
ax.set_axis_off()
示例#26
0
 def to_tensor(self):
     return tl.tucker_to_tensor(self.decomposition)
示例#27
0
    def _fit_2d(self, X, Y):
        """
        Compute the HOPLS for X and Y wrt the parameters R, Ln and Km for the special case mode_Y = 2.

        Parameters:
            X: tensorly Tensor, The target tensor of shape [i1, ... iN], N = 2.

            Y: tensorly Tensor, The target tensor of shape [j1, ... jM], M >= 3.

        Returns:
            G: Tensor, The core Tensor of the HOPLS for X, of shape (R, L2, ..., LN).

            P: List, The N-1 loadings of X.

            D: Tensor, The core Tensor of the HOPLS for Y, of shape (R, K2, ..., KN).

            Q: List, The N-1 loadings of Y.

            ts: Tensor, The latent vectors of the HOPLS, of shape (i1, R).
        """

        # Initialization
        Er, Fr = X, Y
        P, T, W, Q = [], [], [], []
        D = tl.zeros((self.R, self.R))
        G = []

        # Beginning of the algorithm
        # Gr, _ = tucker(Er, ranks=[1] + self.Ln)
        for r in range(self.R):
            if torch.norm(Er) > self.epsilon and torch.norm(Fr) > self.epsilon:
                # computing the covariance
                Cr = mode_dot(Er, Fr.t(), 0)

                # HOOI tucker decomposition of C
                Gr_C, latents = tucker(Cr, rank=[1] + self.Ln)

                # Getting P and Q loadings
                qr = latents[0]
                qr /= torch.norm(qr)
                # Pr = latents[1:]
                Pr = [a / torch.norm(a) for a in latents[1:]]
                P.append(Pr)
                tr = multi_mode_dot(Er, Pr, list(range(1, len(Pr) + 1)), transpose=True)
                # Gr_pi = torch.pinverse(matricize(Gr))
                # tr = torch.mm(matricize(tr), Gr_pi)
                GrC_pi = torch.pinverse(matricize(Gr_C))
                tr = torch.mm(matricize(tr), GrC_pi)
                tr /= torch.norm(tr)

                # recomposition of the core tensor of Y
                ur = torch.mm(Fr, qr)
                dr = torch.mm(ur.t(), tr)

                D[r, r] = dr
                Pkron = kronecker([Pr[self.N - n - 1] for n in range(self.N)])
                # P.append(torch.mm(matricize(Gr), Pkron.t()).t())
                # W.append(torch.mm(Pkron, Gr_pi))
                Q.append(qr)
                T.append(tr)
                Gd = tl.tucker_to_tensor([Er, [tr] + Pr], transpose_factors=True)
                Gd_pi = torch.pinverse(matricize(Gd))
                W.append(torch.mm(Pkron, Gd_pi))

                # Deflation
                # X_hat = torch.mm(torch.cat(T, dim=1), torch.cat(P, dim=1).t())
                # Er = X - np.reshape(X_hat, (Er.shape), order="F")
                Er = Er - tl.tucker_to_tensor([Gd, [tr] + Pr])
                Fr = Fr - dr * torch.mm(tr, qr.t())
            else:
                break

        Q = torch.cat(Q, dim=1)
        T = torch.cat(T, dim=1)
        # P = torch.cat(P, dim=1)
        W = torch.cat(W, dim=1)

        self.model = (P, Q, D, T, W)
        return self
示例#28
0
    def fit(self, X, Y):
        """
        Compute the HOPLS for X and Y wrt the parameters R, Ln and Km.

        Parameters:
            X: tensorly Tensor, The target tensor of shape [i1, ... iN], N >= 3.

            Y: tensorly Tensor, The target tensor of shape [j1, ... jM], M >= 3.

        Returns:
            G: Tensor, The core Tensor of the HOPLS for X, of shape (R, L2, ..., LN).

            P: List, The N-1 loadings of X.

            D: Tensor, The core Tensor of the HOPLS for Y, of shape (R, K2, ..., KN).

            Q: List, The N-1 loadings of Y.

            ts: Tensor, The latent vectors of the HOPLS, of shape (i1, R).
        """
        # check parameters
        X_mode = len(X.shape)
        Y_mode = len(Y.shape)
        assert Y_mode >= 2, "Y need to be mode 2 minimum."
        assert X_mode >= 3, "X need to be mode 3 minimum."
        assert (
            len(self.Ln) == X_mode - 1
        ), f"The list of ranks for the decomposition of X (Ln) need to be equal to the mode of X -1: {X_mode-1}."
        if Y_mode == 2:
            return self._fit_2d(X, Y)
        assert (
            len(self.Km) == Y_mode - 1
        ), f"The list of ranks for the decomposition of Y (Km) need to be equal to the mode of Y -1: {Y_mode-1}."

        # Initialization
        Er, Fr = X, Y
        In = X.shape
        T, G, P, Q, D, W = [], [], [], [], [], []

        # Beginning of the algorithm
        for r in range(self.R):
            if torch.norm(Er) > self.epsilon and torch.norm(Fr) > self.epsilon:
                Cr = torch.Tensor(np.tensordot(Er, Fr, (0, 0)))
                # HOOI tucker decomposition of C
                _, latents = tucker(Cr, ranks=self.Ln + self.Km)

                # Getting P and Q loadings
                Pr = latents[: len(Er.shape) - 1]
                Qr = latents[len(Er.shape) - 1 :]

                # computing product of Er by latents of X
                tr = multi_mode_dot(Er, Pr, list(range(1, len(Pr))), transpose=True)

                # Getting t as the first leading left singular vector of the product
                tr = torch.svd(matricize(tr))[0][:, 0]
                tr = tr[..., np.newaxis]

                # recomposition of the core tensors
                Gr = tl.tucker_to_tensor(Er, [tr] + Pr, transpose_factors=True)
                Dr = tl.tucker_to_tensor(Fr, [tr] + Qr, transpose_factors=True)
                Pkron = kronecker([Pr[self.N - n - 1] for n in range(self.N)])
                Gr_pi = torch.pinverse(matricize(Gr))
                W.append(torch.mm(Pkron, Gr_pi))

                # Gathering of
                P.append(Pr)
                Q.append(Qr)
                G.append(Gr)
                D.append(Dr)
                T.append(tr)

                # Deflation
                Er = Er - tl.tucker_to_tensor(Gr, [tr] + Pr)
                Fr = Fr - tl.tucker_to_tensor(Dr, [tr] + Qr)
            else:
                break

        T = torch.cat(T, dim=1)
        W = torch.cat(W, dim=1)
        self.model = (P, Q, D, T, W)
        return self
示例#29
0
"""
# In[] Import Neccessary Packages
import tensorly as tl
from tensorly.decomposition import non_negative_tucker
import matplotlib.pyplot as plt
import numpy as np

user_tensor = user100_tensor

# In[]
# Experiment 1: Non-negative Tucker Decomposition
core, factors = non_negative_tucker(user_tensor,
                                    rank=(20, 10, 10, 3),
                                    n_iter_max=3000)

reconstruction = tl.tucker_to_tensor(core, factors)
error = tl.norm(reconstruction - user_tensor) / tl.norm(user_tensor)
print(error)

user0 = user_tensor[0, :, :, :]
user0_recon = reconstruction[0, :, :, :]
user0_recon = np.round(user0_recon, 4)

# In[]
# Experiment 2: Robust PCA
#### 2.1 Generate boolean mask

mask = user_tensor > 0
mask = mask.astype(np.int)

#### 2.2 Decomposition
示例#30
0
# tensor generation
array = np.random.randint(1000, size=(10, 30, 40))
tensor = tl.tensor(array, dtype='float')

##############################################################################
# Non-negative Tucker
# -----------------------
# First, multiplicative update can be implemented as:

tic = time.time()
tensor_mu, error_mu = non_negative_tucker(tensor,
                                          rank=[5, 5, 5],
                                          tol=1e-12,
                                          n_iter_max=100,
                                          return_errors=True)
tucker_reconstruction_mu = tl.tucker_to_tensor(tensor_mu)
time_mu = time.time() - tic

##############################################################################
# Here, we also compute the output tensor from the decomposed factors by using
# the ``tucker_to_tensor`` function. The tensor ``tucker_reconstruction_mu`` is
# therefore a low-rank non-negative approximation of the input tensor ``tensor``.

##############################################################################
# Non-negative Tucker with HALS and FISTA
# ---------------------------------------
# HALS algorithm with FISTA can be calculated as:

ticnew = time.time()
tensor_hals_fista, error_fista = non_negative_tucker_hals(tensor,
                                                          rank=[5, 5, 5],