Пример #1
0
    def forward(self, X, transposed=False):

        #X = torch.Tensor(X)

        if self.rank_tucker == -1:
            result = torch.addmm(self.bias, X, self.order1_tens)
        else:
            result = torch.addmm(self.bias, X,
                                 tucker_to_tensor(self.order1_tens))

        if self.order >= 2:

            if self.rank_tucker == -1:
                acc = tl.tenalg.mode_dot(self.order2_tens, X, 0)
            else:
                acc = tl.tenalg.mode_dot(tucker_to_tensor(self.order2_tens), X,
                                         0)

            acc = tl.tenalg.mode_dot(acc, X, 1)
            result += torch.einsum('iik->ik', acc)

        if self.order == 3:

            if self.rank_tucker == -1:
                acc = tl.tenalg.mode_dot(self.order3_tens, X, 0)
            else:
                acc = tl.tenalg.mode_dot(tucker_to_tensor(self.order3_tens), X,
                                         0)

            acc = tl.tenalg.mode_dot(acc, X, 1)
            acc = tl.tenalg.mode_dot(acc, X, 2)
            result += torch.einsum('iiik->ik', acc)

        return tl.reshape(result, (X.shape[0], self.output_dim))
Пример #2
0
def impute(tensor_with_nan, laplacians, ranks, alpha, regular, tol, interval):
    fill_value = np.nanmean(tensor_with_nan)

    tensor = tensor_with_nan.copy()

    for index, e in np.ndenumerate(tensor):
        if np.isnan(e):
            tensor[index] = fill_value

    factors = None

    while True:
        for index, tr in np.ndenumerate(tensor_with_nan):
            if not np.isnan(tr):
                tensor[index] = tr

        core, factors = tucker(tensor, ranks, laplacians, n_iter_max=interval, alpha=alpha, tol=None, regular=regular, factors=factors)

        tensor_tmp = tucker_to_tensor((core, factors))
        variation = np.sum(np.power(tensor_tmp - tensor, 2)) / np.sum(np.power(tensor, 2))
        print(variation)
        if variation < tol:
            break

        tensor = tensor_tmp

    return tensor
Пример #3
0
    def reconstruct(self):
        """
        Reconstructs the decomposed TensorDecomp object.
        Assigns the reconstructed tensor to self.recons attribute.

        Parameters
        ----------
        self : object of class TensorDecomp type.
            
        Returns
        -------
        None            
        
        """

        if self.decomp_type == 'svd':
            self.recons = self.decomposed[0] @ (
                np.diag(self.decomposed[1]) @ self.decomposed[2])

        elif self.decomp_type == 'NMF':
            self.recons = self.nmf_obj.inverse_transform(self.decomposed[0])

        elif self.decomp_type == 'tucker':
            from tensorly import tucker_tensor as tt
            self.recons = tt.tucker_to_tensor(self.decomposed)

        elif self.decomp_type == 'parafac':
            from tensorly import cp_tensor as ct
            self.recons = ct.cp_to_tensor(self.decomposed)

        elif self.decomp_type == 'matrix_product_state':
            from tensorly import tt_tensor as tt
            self.recons = tt.tt_to_tensor(self.decomposed)
        elif self.decomp_type == 'clarkson_woodruff_transform':
            self.recons = self.decomposed
Пример #4
0
 def recover(self):
     """
     Recover the original shape.
     """
     core = self.core_layer.weight.data
     out_factor = self.out_channel_layer.weight.data.squeeze()
     in_factor = self.in_channel_layer.weight.data.squeeze()
     in_factor = torch.transpose(in_factor, 1, 0)
     return tucker.tucker_to_tensor(core, [out_factor, in_factor])
Пример #5
0
    def em(self, max_iter=10, tol=1.e-7, init=False):
        if init: self._initialize_parameters()
        # initialize the expectations of V(m)
        Ev, Evv, = _compute_context_expectation(self.L, self.M, self.N, self.S,
                                                self.U, self.xi, self.sgmV,
                                                self._lambda)
        # start EM algorithm
        for iter in range(max_iter):
            print('================')
            print(' iter', iter + 1)
            print('================')
            try:
                (Ev, Evv, Ez, self.U, self.B, self.z0, self.psi0, self.sgm0,
                 self.sgmO, self.sgmR, self.sgmV,
                 self.xi) = _em(self.X, self.W, self.T, self.S, self.L, self.M,
                                self.N, self._lambda, Ev, Evv, self.U, self.B,
                                self.z0, self.psi0, self.sgm0, self.sgmO,
                                self.sgmR, self.sgmV, self.xi)
            except KeyboardInterrupt:
                self.z = Ez
                print(self.z[:3])
                self.save_params()
                break

            self.llh.append(self.compute_log_likelihood())
            # if abs(llh[-1] - llh[-2]) < tol:
            #     print("converged!!")
            #     break
            print("log-likelihood=", self.llh[-1])

            self.z0_log.append(self.z0)
            self.sgm0_log.append(self.sgm0)
            self.sgmO_log.append(self.sgmO)
            self.sgmR_log.append(self.sgmR)
            self.sgmV_log.append(deepcopy(self.sgmV))
            self.xi_log.append(deepcopy(self.xi))

        # EZ = Ez.reshape((self.T, *self.L))
        # print(EZ.shape, self.U[0].shape)
        self.z = Ez
        self.recon_ = recon_ = np.array([
            tucker_to_tensor(Ez[t].reshape(*self.L), self.U)
            for t in range(self.T)
        ])
        print(self.recon_.shape)
        self.recon_ = np.moveaxis(recon_, -1, 1)
Пример #6
0
def representive_connMatrix_tucker(conn_seg):
    """ extract one representive connectivity matrix from a series of connective matrices for each time segment
		based on Tucker decomposition

		@ parameter conn_seg: a series of connective matrics for a time segment (n_chns * n_chns * n_times)

		return conn_repre: one representive connective matrix for this time segment

	"""
    rank = [15, 15, 3]
    core, factors = tucker(conn_seg, ranks=rank)
    recon_conn_seg = tucker_to_tensor(core,
                                      factors)  # recon_conn_seg: 64*64*60
    conn_repre = np.mean(
        recon_conn_seg, axis=2
    )  # conn_repre: n_chns * n_chns, connective matrix summary for each segment

    return conn_repre
Пример #7
0
    def recover(self):
        """
        Recover original tensor from decomposed tensor

        @return: 4D weight tensor with original layer's shape
        """

        # get core
        core = self.core_layer.weight.data

        # get factor
        out_factor = self.out_channel_layer.weight.data.squeeze()

        in_factor = self.in_channel_layer.weight.data.squeeze()
        in_factor = torch.transpose(in_factor, 1, 0)

        # recover
        recovered = tucker_to_tensor(core, [out_factor, in_factor])

        return recovered
Пример #8
0
matU = kronecker(U, reverse=True)
predict = z @ matU.T
print(predict.shape)
print(unfold(X, -1).shape)
for i in range(unfold(X, -1).shape[1]):
    plt.plot(unfold(X, -1)[:, i])
    plt.plot(predict[:, i])
    plt.show()
exit()

# print(Z.shape)
# Z = np.moveaxis(Z, 1,2)
# print(Z.shape)
Xn = []
for t in range(T):
    Xn.append(tucker_to_tensor(Z[t], U))
Xn = np.array(Xn)
Xn = np.moveaxis(Xn, 0, -1)

# mode = 0
# for i in range(X.shape[0]):
#     for j in range(X.shape[1]):
#         plt.plot(X[i, j, mode, :].T)
#         plt.plot(Xn[i, j, mode, :].T)
#         plt.show()
# exit()
for i in range(X.shape[0]):
    for j in range(X.shape[1]):
        plt.plot(X[i, j, :].T)
        plt.plot(Xn[i, j, :].T)
        plt.show()
Пример #9
0
        for i in range(self.T):
            # run sta update
            X = self.t[:,:,:,i]
            for j in range(3):
                self.ita(self.Ulist, self.Dlist, j, X, min(i+1, 200),forget=f)
            
            # reconstruct and find error
            temp = tl.tenalg.multi_mode_dot(X, [i.T for i in self.Ulist])
            that = tl.tenalg.multi_mode_dot(temp, [i for i in self.Ulist])
            emat = that - X
            
            self.reconerror.append(np.linalg.norm(emat) ** 2)
            self.error.append(self.reconerror[-1] / np.linalg.norm(X) ** 2)
            
            if print_error:
                print(self.reconerror[-1])
            
        self.meanerr = np.mean(self.error)

#%%
m, T, r = 10, 100, 3
sigma = 0.1
np.random.seed(5)
Tensor = np.random.normal(size = (m,m,m))
core, factors = tucker(Tensor, rank = [3, 3, 3])
Tensor = tucker_to_tensor(core, factors)
Tensor = np.outer(Tensor, np.ones(T)).reshape((m,m,m,T))
Tensor += np.random.normal(scale = sigma, size = (m,m,m, T))
Tensor[:,:,:,0] += np.random.normal(scale = 10*sigma, size = (m,m,m,))
model = ita3d(Tensor,3,3,3)
model.fit()