def ir_tensor_l1pca(X, K, A, n_max, num_init, print_flag):

	dataset_matrix_size = X.shape
	dataset_matrix_size = list(dataset_matrix_size)
	
	# Initialize the unfolding, subspace, and conformity lists.
	unfolding_ii = [[] for xx in range(len(dataset_matrix_size))]	# Unfolding list.
	Q_ii = [[] for xx in range(len(dataset_matrix_size))]			# Subspace list.
	conf_ii = [[] for xx in range(len(dataset_matrix_size))]		# Conformity list.

	# Calculate the initial subspaces.
	for ii in range (0, len(dataset_matrix_size)):
		unfolding_ii[ii] = unfold(X,ii)		# Calculate the unfoldings.
		
		Q_ii[ii], B, vmax= l1pca_sbfk(unfolding_ii[ii], K[ii], num_init, print_flag)	# Calculate the subspaces.
	
	# Iterate.
	for iter_ in range(0, n_max):

		for ii in range(0, len(A)):
			# Calculate the norm of the projection of each column of the unfolding.
			vect_weight = np.linalg.norm(np.matmul(np.matmul(Q_ii[ii],Q_ii[ii].transpose()),unfolding_ii[ii]), axis = 0)
			# Convert to tensor form and multiply with the corresponding weight
			conf_ii[ii] = np.array(fold(np.matlib.repmat(vect_weight, dataset_matrix_size[ii],1), ii, X.shape), dtype=float)*A[ii]
		
		# Combine the conformity values to form the final conformity tensor.	
		conf_final = zero_one_normalization(sum(conf_ii))

		# Calculate the updated L1-PCs.
		for ii in range(0, len(A)):
			Q_ii[ii], B, vmax= l1pca_sbfk(unfold(np.multiply(X,conf_final),ii), K[ii], num_init, print_flag)

	return Q_ii, conf_final
Beispiel #2
0
def decompose_three_way(tensor, rank, max_iter=501, verbose=False):

    # a = np.random.random((rank, tensor.shape[0]))
    b = np.random.random((rank, tensor.shape[1]))
    c = np.random.random((rank, tensor.shape[2]))

    for epoch in range(max_iter):
        # optimize a
        input_a = khatri_rao([b.T, c.T])
        target_a = tl.unfold(tensor, mode=0).T
        a = np.linalg.solve(input_a.T.dot(input_a), input_a.T.dot(target_a))

        # optimize b
        input_b = khatri_rao([a.T, c.T])
        target_b = tl.unfold(tensor, mode=1).T
        b = np.linalg.solve(input_b.T.dot(input_b), input_b.T.dot(target_b))

        # optimize c
        input_c = khatri_rao([a.T, b.T])
        target_c = tl.unfold(tensor, mode=2).T
        c = np.linalg.solve(input_c.T.dot(input_c), input_c.T.dot(target_c))

        if verbose and epoch % int(max_iter * .2) == 0:
            res_a = np.square(input_a.dot(a) - target_a)
            res_b = np.square(input_b.dot(b) - target_b)
            res_c = np.square(input_c.dot(c) - target_c)
            print("Epoch:", epoch, "| Loss (C):", res_a.mean(), "| Loss (B):", res_b.mean(), "| Loss (C):", res_c.mean())

    return a.T, b.T, c.T
Beispiel #3
0
def calculate_tucker_energy(tensor, A):
    energy_stack = []
    for i in range(tensor.ndim):
        sample_count_i = np.prod(tensor.shape) / tensor.shape[i]
        sample_coef_i = 1 / (sample_count_i - 1)
        unfold_i = tl.unfold(tensor, i)
        tensor_proj_i = tl.tenalg.mode_dot(tensor, A[i].T, i)
        tensor_proj_unfold_i = tl.unfold(tensor_proj_i, i)

        full_cov_i = sample_coef_i * unfold_i @ unfold_i.T
        tensor_proj_cov_i = sample_coef_i * tensor_proj_unfold_i @ tensor_proj_unfold_i.T

        total_energy_i = np.trace(full_cov_i)
        expl_energy_i_per_component = -np.sort(-tensor_proj_cov_i.diagonal())
        expl_energy_ratio_i_per_component = expl_energy_i_per_component / total_energy_i
        total_energy_i_per_component = [
            total_energy_i for _ in range(len(expl_energy_i_per_component))
        ]

        energy_stack.append([
            total_energy_i_per_component, expl_energy_i_per_component,
            expl_energy_ratio_i_per_component
        ])
    energy_stack = np.array(energy_stack)

    return energy_stack
Beispiel #4
0
def update_Um(m, p, X, G, Us):
    """
    Update and return the U matrix of mode m
    
    Input:
      - m: The mode along which the update will take place
      - p: AR model order
      - X: Data in their original form (before decomposition)
      - G: Core tensors
      - Us: List containing the U matrices
    
    Returns:
      - new_Um: New U matrix along mode-m
    """
    Bs = []
    H = tl.tenalg.kronecker([u.T for u, i in zip(Us[::-1], reversed(range(len(Us)))) if i!= m ])
    for t in range(p, X.shape[-1]):
        unfold_X = tl.unfold(X[..., t], m)
        dot1 = np.dot(unfold_X, H.T)
        Bs.append(np.dot(dot1, tl.unfold(G[..., t], m).T ))
    b = np.sum(Bs, axis=0)
    U1, _, V1 = np.linalg.svd(b, full_matrices=False)
    proc1 = np.dot(U1, V1)
    Us[m] = proc1
    new_Um = np.dot(U1, V1)
    return new_Um
def CPD(X, R, iterations):
    np.random.seed(0)
    X = tl.tensor(X)
    X1 = tl.unfold(X, 0)
    X2 = tl.unfold(X, 1)
    X3 = tl.unfold(X, 2)
    XV = [X1, X2, X3]

    A1 = np.random.rand(X.shape[0], R)
    A2 = np.random.rand(X.shape[1], R)
    A3 = np.random.rand(X.shape[2], R)
    A = [A1, A2, A3]
    lam = [[], [], []]

    for i in range(iterations):
        for k in range(3):
            AO = A[:k] + A[k + 1:]
            V = np.multiply(np.matmul(AO[0].T, AO[0]),
                            np.matmul(AO[1].T, AO[1]))
            KR = tl.tenalg.khatri_rao(AO)
            A[k] = np.matmul(np.matmul(XV[k], KR),
                             np.matmul(np.linalg.inv(np.matmul(V.T, V)), V.T))
            lam[k] = np.linalg.norm(A[k], axis=0)
            A[k] = A[k] / lam[k]

    return A, lam[0]
Beispiel #6
0
def approx_tensor_rank(A,
                       threshold=0.03,
                       ranks_for_imputation=(20, 4, 2, 8, 20),
                       verbose=False):
    """Compute approximate (matrix) rank of a tensor. Right now this function only supports the calculation of approximte rank of dataset and estimator dimensions. 

    Args:
        A (np.ndarray):    Tensor for which to compute rank.
        threshold (float): All singular values less than threshold * (largest singular value) will be set to 0

    Returns:
        tuple of int: The approximate rank of A in each dimension.
    """

    dim = len(A.shape)

    if np.sum(np.isnan(A)):
        _, _, A, _ = tucker_on_error_tensor(A,
                                            ranks=ranks_for_imputation,
                                            verbose=verbose)

    s0 = sp.linalg.svd(tl.unfold(A, mode=0), compute_uv=False)
    rank0 = len(s0[s0 >= threshold * s0[0]])

    s_last = sp.linalg.svd(tl.unfold(A, mode=dim - 1), compute_uv=False)
    rank_last = len(s_last[s_last >= threshold * s_last[0]])

    return (rank0, 4, 2, 8, rank_last)
Beispiel #7
0
    def TFAI_CP_within_mod(self,
                           X,
                           S_m,
                           S_d,
                           r=3,
                           alpha=0.25,
                           beta=1.0,
                           lam=0.1,
                           tol=1e-7,
                           max_iter=500,
                           seed=0):
        m = X.shape[0]
        d = X.shape[1]
        t = X.shape[2]

        # initialization
        np.random.seed(seed)
        C = np.mat(np.random.rand(m, r))
        P = np.mat(np.random.rand(d, r))
        D = np.mat(np.random.rand(t, r))

        X_1 = np.mat(tl.unfold(X, 0))
        X_2 = np.mat(tl.unfold(X, 1))
        X_3 = np.mat(tl.unfold(X, 2))
        D_C = np.diagflat(S_m.sum(1))
        D_P = np.diagflat(S_d.sum(1))
        L_C = D_C - S_m
        L_P = D_P - S_d

        for i in range(max_iter):
            G = np.mat(tl.tenalg.khatri_rao([P, D]))
            output_X_old = tl.fold(np.array(C * G.T), 0, X.shape)

            C = np.mat(
                sp.linalg.solve_sylvester(
                    np.array(alpha * L_C + lam * np.mat(np.eye(m))),
                    np.array(G.T * G), X_1 * G))
            U = np.mat(tl.tenalg.khatri_rao([C, D]))
            P = np.mat(
                sp.linalg.solve_sylvester(
                    np.array(beta * L_P + lam * np.mat(np.eye(d))),
                    np.array(U.T * U), X_2 * U))
            B = np.mat(tl.tenalg.khatri_rao([C, P]))
            D = X_3 * B * np.linalg.inv(B.T * B + lam * np.eye(r))

            output_X = tl.fold(np.array(D * B.T), 2, X.shape)
            err = np.linalg.norm(output_X -
                                 output_X_old) / np.linalg.norm(output_X_old)
            if err < tol:
                print(i)
                break
        predict_X = tl.fold(
            np.array(C * np.mat(tl.tenalg.khatri_rao([P, D])).T), 0, X.shape)
        return predict_X
Beispiel #8
0
def perform_CMTF(tOrig=None, mOrig=None, r=10):
    """ Perform CMTF decomposition. """
    if tOrig is None:
        tOrig, mOrig = createCube()

    tFac = CPTensor(initialize_cp(np.nan_to_num(tOrig, nan=np.nanmean(tOrig)), r, non_negative=True))
    mFac = CPTensor(initialize_cp(np.nan_to_num(mOrig, nan=np.nanmean(mOrig)), r, non_negative=True))

    # Pre-unfold
    selPat = np.all(np.isfinite(mOrig), axis=1)
    unfolded = tl.unfold(tOrig, 0)
    missing = np.any(np.isnan(unfolded), axis=0)
    unfolded = unfolded[:, ~missing]

    R2X = -1.0
    mFac.factors[0] = tFac.factors[0]
    mFac.factors[1] = np.linalg.lstsq(mFac.factors[0][selPat, :], mOrig[selPat, :], rcond=None)[0].T

    for ii in range(8000):
        # Solve for the subject matrix
        kr = khatri_rao(tFac.factors[1], tFac.factors[2])[~missing, :]
        kr2 = np.vstack((kr, mFac.factors[1]))
        unfolded2 = np.hstack((unfolded, mOrig))

        tFac.factors[0] = censored_lstsq(kr2, unfolded2.T)
        mFac.factors[0] = tFac.factors[0]

        # PARAFAC on other antigen modes
        for m in [1, 2]:
            kr = khatri_rao(tFac.factors[0], tFac.factors[3 - m])
            unfold = tl.unfold(tOrig, m)
            tFac.factors[m] = censored_lstsq(kr, unfold.T)

        # Solve for the glycan matrix fit
        mFac.factors[1] = np.linalg.lstsq(mFac.factors[0][selPat, :], mOrig[selPat, :], rcond=None)[0].T

        if ii % 20 == 0:
            R2X_last = R2X
            R2X = calcR2X(tOrig, mOrig, tFac, mFac)

        if R2X - R2X_last < 1e-6:
            break

    tFac.normalize()
    mFac.normalize()

    # Reorient the later tensor factors
    tFac.factors, mFac.factors = reorient_factors(tFac.factors, mFac.factors)

    return tFac, mFac, R2X
def unfold(S,mode):
    x = S.shape[0]
    y = S.shape[1]
    z = S.shape[2]

    if mode == 0:
        M=np.zeros((x,y*z))
        for i in range(z):
            M[:,y*i:y*(i+1)] = S[:,:,i]
    elif mode == 1:
        M = tl.unfold(np.swapaxes(S,0,2),1)
    elif mode == 2:
        M = tl.unfold(np.swapaxes(S,0,1),2)
    else:
        pass
    return M
Beispiel #10
0
def svd_init_fac(tensor, rank):
    """
    svd initialization of factor matrices for a given tensor and rank
    
    Parameters
    ----------
    tensor : tensor
    rank : int

    Returns
    -------
    factors : list of matrices

  """
    factors = []
    for mode in range(tl.ndim(tensor)):
        # unfolding of a given mode
        unfolded = tl.unfold(tensor, mode)
        if rank <= tl.shape(tensor)[mode]:
            u, s, v = tl.partial_svd(
                unfolded,
                n_eigenvecs=rank)  # first rank eigenvectors/values (ascendent)
        else:
            u, s, v = tl.partial_svd(unfolded,
                                     n_eigenvecs=tl.shape(tensor)[mode])
            # completed by random columns
            u = np.append(u,
                          np.random.random(
                              (np.shape(u)[0], rank - tl.shape(tensor)[mode])),
                          axis=1)
            # sometimes we have singular matrix error for als
        factors += [u]
    return (factors)
Beispiel #11
0
def gpu_sthosvd(
    tensor: torch.Tensor, core_size: List[int]
) -> Tuple[torch.Tensor, List[torch.Tensor], List[torch.Tensor]]:
    """
    GPU Seqeuntially Truncated Higher Order SVD.
    
    Parameters
    ----------
    tensor : torch.Tensor,
        arbitrarily dimensional tensor
    core_size : list of int
    
    Returns
    -------
    torch.Tensor
        core tensor
    List[torch.Tensor]
        list of singular vectors
    List[torch.Tensor]
        list of singular vectors
    """
    intermediate = tensor
    singular_vectors, singular_values = [], []
    for mode in range(len(tensor.shape)):
        to_unfold = intermediate
        svec, sval, _ = gpu_tsvd(tl.unfold(to_unfold, mode), core_size[mode])
        intermediate = tl.tenalg.mode_dot(intermediate, svec.t(), mode)
        singular_vectors.append(svec)
        singular_values.append(sval)
    return intermediate, singular_vectors, singular_values
Beispiel #12
0
def MLSVD(A, truncated=False, compute_core_tensor=True):
    ''' Multi-Linear Singular Value Decomposition

    Parameters
    ----------
    A : Tensor of order n,
        Tensor to decompose
    truncated False or int,
        Whether to compute truncated SVD. Defaults to False.
    Yields
    -------
    [U1,U2,...,Un] : list of matrices,
        List of mode n singular vectors of A

    '''
    U = []
    if truncated is False:
        truncated = -1

    ## Compute orthogonal matrices
    for o in range(len(A.shape)):
        unfolded = tl.unfold(A, o)
        U.append(la.svd(unfolded)[0][:, :truncated])

    if compute_core_tensor:
        ## Compute core tensor
        B = tl.tenalg.multi_mode_dot(A,
                                     U,
                                     modes=range(len(A.shape)),
                                     transpose=True)

        return B, U
    else:
        return U
def get_Binary_FactorU(Tx, Ty, U_list, mode_j, I_j, lambda_regula):
    """
    Estimate current factor matrix U with L2 penalty in Binary detection model;
    It is for one-time estimation only
    Tx: Input Tensor
    Ty: Response Tensor
    mode_j: j-th mode
    I_j: j-th mode dimension
    lambda_regula: regularization parameter for the L2 penalty

    Notice that in Binary case, both Tx and Ty are single tensor
    """
    Phi = Phi_mode(Ty, mode_j)
    temptensor = Tx[0]
    C_prod = tl.unfold(multi_mode_dot(temptensor, U_list), mode_j)
    res = np.multiply(Phi.T, C_prod)
    a = np.shape(res)
    for i in range(a[0]):
        for j in range(a[1]):
            if res[i, j] >= 1:
                Phi[j,
                    i] = 0  # We want to change elements in Phi.T, so have to reverse index


#   Forcing phi_{ij} to be zero, we can actually remove the gradient coming from non_sv loss
    G = F_MM(Tx, mode_j, U_list)
    Iden = np.diag(np.ones(I_j))
    leftMM = np.dot(G, G.T) + 0.5 * lambda_regula * Iden
    LMM = np.linalg.pinv(leftMM)
    RMM = np.dot(G, Phi)
    U_new = np.dot(LMM, RMM)
    return U_new.T
Beispiel #14
0
 def encode(self, data):
     if len(data.shape) == 2:
         data = data[np.newaxis, ...]
     input_a = tl.tenalg.khatri_rao([self.factor, self.factor])
     target_a = tl.unfold(data, mode=0).T
     return np.squeeze(
         np.linalg.solve(input_a.T.dot(input_a), input_a.T.dot(target_a)).T)
Beispiel #15
0
def set_tensor_matricization(x, mode=0):
    matricization = []

    for sample in x:
        matricization.append(tl.unfold(sample, mode))

    return np.asarray(matricization)
Beispiel #16
0
 def run_aca_full():
     N = 200
     C_list = []
     ranks = np.array([N, N, N])
     tensor = np.asarray(utilis.get_B_one(N))
     for mode in range(3):
         if mode == 0:
             # Start with original matrix
             Core_mat = tl.unfold(tensor, mode)
         else:
             Core_mat = tl.unfold(Core_ten, mode)
         C, U, R = aca_fun.aca_full_pivoting(Core_mat, 10e-5)
         ranks[mode] = U.shape[0]
         print(f'Current ranks: {ranks}')
         Core_ten = tl.fold(np.dot(U, R), mode, ranks)
         C_list.append(C)
Beispiel #17
0
def test_aca_func():
    # check whether functional matrizitation is identical with original one
    N = 50
    for obj in ["B1", "B2"]:
        C_list = []
        ranks = np.array([N, N, N])
        for mode in range(3):
            if mode == 0:
                if obj == "B1":
                    functional_generator = aca_fun.mode_m_matricization_fun(
                        aca_fun.b1, N, N, N)
                    C, U, R = aca_fun.aca_partial_pivoting(
                        functional_generator, N, N * N, N, 10e-4 / 3)
                    tensor = np.asarray(utilis.get_B_one(N))
                else:
                    functional_generator = aca_fun.mode_m_matricization_fun(
                        aca_fun.b2, N, N, N)
                    C, U, R = aca_fun.aca_partial_pivoting(
                        functional_generator, N, N * N, N, 10e-4 / 3)
                    tensor = np.asarray(utilis.get_B_two(N))
            else:
                Core_mat = tl.unfold(Core_ten, mode)
                C, U, R = aca_fun.aca_full_pivoting(Core_mat, 10e-4 / 3)
            ranks[mode] = U.shape[0]
            Core_ten = tl.fold(np.dot(U, R), mode, ranks)
            C_list.append(C)

        recon = utilis.reconstruct_tensor(C_list, Core_ten, tensor)
        error = utilis.frobenius_norm_tensor(recon - tensor)
        assert (error < 10e-4 * utilis.frobenius_norm_tensor(tensor))
Beispiel #18
0
def tt_als(T, cores, omega):
    if len(cores) <= 1:
        return cores
    new_cores = []
    for core in cores:
        new_cores.append(core.get_tensor())
    new_cores[0] = new_cores[0].unsqueeze(0)
    new_cores[-1] = new_cores[-1].unsqueeze(-1)
    for s in range(len(cores)):
        B = tensor_connect(new_cores, s)
        B_mat = tl.unfold(B, 1)
        T_mat = tl.unfold(T.get_tensor(), s)
        omega_mat = tl.unfold(omega, s)
        ranks = (B.shape[-1], B.shape[0])
        new_cores[s] = tt_als_step(B_mat, new_cores[s], T_mat, omega_mat, 0.01,
                                   ranks)
    return put_mps(new_cores)[0]
Beispiel #19
0
def reshape_expectation(z, ranks, mode):
    Lm = ranks[mode]
    Ln = int(np.prod(ranks) / Lm)
    Z = np.zeros((len(z), Lm, Ln))
    # mode-m matricize E[z(t)]
    for t, zt in enumerate(z):
        Z[t] = unfold(zt.reshape(ranks), mode)
    return Z
Beispiel #20
0
def run_hosvd(X,ranks): 
    arms = [] 
    core = X
    for mode in range(X.ndim):
        U,_,_ = randomized_svd(tl.unfold(X,mode),ranks[mode])
        arms.append(U) 
        core = tl.tenalg.mode_dot(core, U.T,mode) 
    return core, arms
Beispiel #21
0
def falrtc(c, T, K, Omega):
    X = T
    a = abs(np.random.standard_normal(3))
    a /= sum(a)
    m = a / 100000
    L = np.sum(m)
    Z = T
    W = T
    B = 0
    for k in range(K):
        while True:
            theta = (1 + mt.sqrt(1 + 4 * L * B)) / (2 * L)
            W = (theta / L) / (B + theta / L) * Z + B / (B + theta / L) * X

            # compute f_mu(X), f_mu(W), and gradient of f_mu(W)
            fx = 0
            fw = 0
            fxp = 0
            gw = np.zeros(X.shape)
            for i in range(3):
                [trunkX, sigX] = truncate_op(tl.unfold(X, i), m[i] / a[i])
                [trunkW, sigW] = truncate_op(tl.unfold(W, i), m[i] / a[i])
                fx += np.sum(sigX)
                fw += np.sum(sigW)
                gw += tl.fold((a[i] * a[i] / m[i]) * trunkW, i, W.shape)

            # replace the known pixels with zeros in gw
            for nr in range(X.shape[0]):
                for nc in range(X.shape[1]):
                    if all(Omega[nr, nc, :] != 0):
                        gw[nr, nc, :] = 0

            if fx <= fw - (LA.norm(gw)**2) / (2 * L):
                break
            Xp = W - gw / L
            for r in range(3):
                [_, sig_fxp] = truncate_op(tl.unfold(Xp, r), m[r] / a[r])
                fxp += np.sum(sig_fxp)
            if fxp <= fw - (LA.norm(gw)**2) / (2 / L):
                X = Xp
                break
            L = L / c
        Z = Z - theta * gw
        B = B + theta
    return X
def Phi_mode(Y, mode_j):
    """
    Phi vector in the algorithm
    Given a list of reponse tensors y_i, return a column vector, each element is y_j unfold at mode j
    We should form it as matrix not tensor, so take np array
    """
    temp = [tl.unfold(i, mode_j) for i in Y]
    ans = np.hstack(tuple(temp))
    return ans.T
Beispiel #23
0
 def trunc_hosvd(self, tensor):
     A = []
     ranks = np.repeat(self.R, tensor.ndim) if isinstance(self.R, int) else self.R
     for i in range(tensor.ndim):
         unfold_i = tl.unfold(tensor, i)
         u, _, _ = svds(unfold_i, k=ranks[i], tol=self.tol)
         A.append(u)
     A = np.array(A)
     G = tl.tenalg.multi_mode_dot(tensor, A, transpose=True)
     return G, A
def run_exp(tensor,
            k,
            mode=1,
            method='normal',
            iteration=100,
            var_reduction='geomedian'):
    '''
    :tensor: generated tensor
    :k: reduced dimension
    :iteration: total run of iterations
    '''
    assert method in ['TRP', 'normal']
    assert var_reduction in ['average', 'geomedian', None]
    shape = list(tensor.shape)
    del shape[mode]

    if method == 'TRP':
        rp = Merp(n=shape, k=k, tensor=True)
    else:
        rp = Merp(n=shape, k=k, tensor=False)

    # unfold tensor
    X = tl.unfold(tensor, mode=1)
    relative_errs = []
    for _ in range(iteration):
        if var_reduction is None:
            rp.regenerate_omega()
            reduced_X = rp.transform(X)

            # qr decomposition
            q, _ = np.linalg.qr(reduced_X, mode='reduced')

            # calculate relative error
            err = np.linalg.norm(np.matmul(np.matmul(q, q.T), X) - X,
                                 ord='fro')
            del q
        else:
            X_hats = []
            for _ in range(5):
                rp.regenerate_omega()
                reduced_X = rp.transform(X)
                # qr decomposition
                q, _ = np.linalg.qr(reduced_X, mode='reduced')
                X_hats.append(np.matmul(np.matmul(q, q.T), X))

                del q

            avg_X_hat = average_mat(X_hats, var_reduction)
            del X_hats
            # calculate relative error
            err = np.linalg.norm(avg_X_hat - X, ord='fro')

        relative_err = err / np.linalg.norm(X, ord='fro')
        relative_errs.append(relative_err)
    return sorted(relative_errs)
Beispiel #25
0
def tmacTT(T, K, Omega):
    X = T
    U = []
    V = []
    un = []
    for n in range(3):
        un.append(tl.unfold(X, n))
        rank = LA.matrix_rank(un[n])
        U.append(np.ones([un[n].shape[0], rank]))
        V.append(np.ones([rank, un[n].shape[1]]))
    for k in range(K):
        for i in range(3):
            un[i] = tl.unfold(X, i)
            U[i] = np.matmul(un[i], np.matrix.transpose(V[i]))
            U_t = np.matrix.transpose(U[i])
            V[i] = np.matmul(np.matmul(LA.pinv(np.matmul(U_t, U[i])), U_t),
                             un[i])
            un[i] = np.matmul(U[i], V[i])
        X = update(un, T, Omega)
    return X
Beispiel #26
0
def CPDMWUTime(X,
               F,
               sketching_rates,
               lamb,
               eps,
               nu,
               Hinit,
               max_time,
               sample_interval=0.5):
    weights = np.array([1] * len(sketching_rates)) / (len(sketching_rates))

    dim_1, dim_2, dim_3 = X.shape
    A, B, C = Hinit[0], Hinit[1], Hinit[2]

    X_unfold = [tl.unfold(X, m) for m in range(3)]

    norm_x = norm(X)
    I = np.eye(F)

    PP = tl.kruskal_to_tensor((np.ones(F), [A, B, C]))
    error = np.linalg.norm(X - PP)**2 / norm_x

    NRE_A = {0: error}

    start = time.time()

    sketching_rates_selected = {}
    now = time.time()
    itr = 1
    with tqdm(position=0) as pbar:
        while now - start < max_time:
            s = sketching_weight(sketching_rates, weights)

            # Solve Ridge Regression for A,B,C
            A, B, C = update_factors(A, B, C, X_unfold, I, lamb, s, F)

            # Update weights
            p = np.random.binomial(n=1, p=eps)
            if p == 1 and len(sketching_rates) > 1:
                update_weights(A, B, C, X_unfold, I, norm_x, lamb, weights,
                               sketching_rates, F, nu, eps)
            now = time.time()
            PP = tl.kruskal_to_tensor((np.ones(F), [A, B, C]))
            error = np.linalg.norm(X - PP)**2 / norm_x
            elapsed = now - start
            NRE_A[elapsed] = error
            sketching_rates_selected[elapsed] = s
            pbar.set_description(
                "iteration: {}  t: {:.5f}  s: {}   error: {:.5f}  rates: {}".
                format(itr, elapsed, s, error, sketching_rates))
            itr += 1
            pbar.update(1)

    return A, B, C, NRE_A, sketching_rates_selected
def T2():

    X, Y, Y2 = [], [], []
    for rg in range(1, size):
        F_res, error = tensor_svp_solve(F,
                                        mask,
                                        delta=1.5,
                                        max_iterations=iters,
                                        k=(rg, rg, rg),
                                        taker_iters=10000,
                                        epsilon=0.01,
                                        R=tensor)
        X.append(rg)
        R_hat1 = tl.unfold(F_res, 0)
        Y.append(calc_unobserved_rmse2(R1, R_hat1, tl.unfold(1 - mask, 0)))
        Y2.append(calc_unobserved_rmse2(T1, R_hat1, tl.unfold(1 - mask, 0)))
    sb.lineplot(X, Y)
    sb.lineplot(X, Y2)
    plt.show()
    exit(0)
Beispiel #28
0
def fuc():
    Image, X, known, a, Mi, Yi, imSize, ArrSize, p, K = init()
    for k in range(K):
        # compute Mi tensors(Step1)
        for i in range(ArrSize[3]):
            temp1 = shrinkage(
                tl.unfold(X, mode=i) +
                tl.unfold(np.squeeze(Yi[:, :, :, i]), mode=i) / p, a[i] / p)
            temp = tl.fold(temp1, i, imSize)
            Mi[:, :, :, i] = temp
        # Update X(Step2)
        X = np.sum(Mi - Yi / p, ArrSize[3]) / ArrSize[3]
        X = ReplaceInd(X, known, Image)
        # Update Yi tensors (Step 3)
        for i in range(ArrSize[3]):
            Yi[:, :, :, i] = np.squeeze(
                Yi[:, :, :, i]) - p * (np.squeeze(Mi[:, :, :, i]) - X)
        # Modify rho to help convergence(Step 4)
        p = 1.2 * p
    return X
Beispiel #29
0
def one_ntd_step_mu(tensor, ranks, in_core, in_factors, beta, norm_tensor,
                   fixed_modes, normalize, mode_core_norm):
    """
    One step of Multiplicative Uodate applied for every mode of the tensor
    and on the core.
    """
    # Copy
    core = in_core.copy()
    factors = in_factors.copy()

    # Generating the mode update sequence
    modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
    
    for mode in modes_list:
        factors[mode] = mu.mu_betadivmin(factors[mode], tl.unfold(tl.tenalg.multi_mode_dot(core, factors, skip = mode), mode), tl.unfold(tensor,mode), beta)

    core = mu.mu_tensorial(core, factors, tensor, beta)

    if normalize[-1]:
        unfolded_core = tl.unfold(core, mode_core_norm)
        for idx_mat in range(unfolded_core.shape[0]):
            if tl.norm(unfolded_core[idx_mat]) != 0:
                unfolded_core[idx_mat] = unfolded_core[idx_mat] / tl.norm(unfolded_core[idx_mat], 2)
        core = tl.fold(unfolded_core, mode_core_norm, core.shape)

    # # Adding the l1 norm value to the reconstruction error
    # sparsity_error = 0
    # for index, sparse in enumerate(sparsity_coefficients):
    #     if sparse:
    #         if index < len(factors):
    #             sparsity_error += 2 * (sparse * np.linalg.norm(factors[index], ord=1))
    #         elif index == len(factors):
    #             sparsity_error += 2 * (sparse * tl.norm(core, 1))
    #         else:
    #             raise NotImplementedError("TODEBUG: Too many sparsity coefficients, should have been raised before.")

    reconstructed_tensor = tl.tenalg.multi_mode_dot(core, factors)

    cost_fct_val = beta_div.beta_divergence(tensor, reconstructed_tensor, beta)
    
    return core, factors, cost_fct_val
Beispiel #30
0
def mach_td(X, rank, p):
    X_s = np.zeros(X.shape).flatten()

    for idx, v in enumerate(X.flatten()):
        coinToss = random.uniform(0,1)        
        if coinToss <= p:
            X_s[idx] = v/p

    X_s = X_s.reshape(X.shape)
    factors = []
    for i in range(X.ndim):
        if rank[i] < X.shape[i]:
           A_s = sa.csr_matrix(tensorly.unfold(X_s,i))
           #print(A_s.nnz)
           factors.append(sla.svds(A_s, k=rank[i], return_singular_vectors='u')[0])
        else:
            U, _, _ = la.svd(tensorly.unfold(X_s,i), full_matrices=False)
            factors.append(U)

    core = ta.multi_mode_dot(X_s,factors, transpose=True)
    return core, factors