예제 #1
0
파일: facets.py 프로젝트: kokikwbt/facets
def _e_step(Xt, Wt, T, L, N, U, B, z0, psi0, sgm0, sgmO, sgmR, sgmV):
    # matricize U and B
    matU = kronecker(U, reverse=True)
    matB = kronecker(B, reverse=True)
    # workspace
    Lp = np.prod(L)
    Np = np.prod(N)
    P = np.zeros((T, Lp, Lp))
    J = np.zeros((T, Lp, Lp))
    mu_ = np.zeros((T, Lp))
    psi = np.zeros((T, Lp, Lp))
    mu_h = np.zeros((T, Lp))
    psih = np.zeros((T, Lp, Lp))
    # forward algorithm
    for t in trange(T, desc='forward'):
        ot = tensor_to_vec(
            Wt[t])  # indices of the observed entries of a tensor X
        xt = tensor_to_vec(Xt[t])[ot]
        lt = sum(ot)  # of observed samples
        Ht = matU[ot, :]
        if t == 0:
            K = sgm0 * Ht.T @ pinv(sgm0 * Ht @ Ht.T + sgmR * np.eye(lt))
            # K = psi0 @ Ht.T @ pinv(Ht @ psi0 @ Ht.T + sgmR * np.eye(lt))
            # psi[0] = sgm0 * np.eye(Lp) - K[0] @ Ht
            psi[0] = sgm0 * (np.eye(Lp) - K[0] @ Ht)
            # psi[0] = (np.eye(Lp) - K @ Ht) @ psi0
            mu_[0] = z0 + K @ (xt - Ht @ z0)
        else:
            P[t - 1] = matB @ psi[t - 1] @ matB.T + sgmO * np.eye(Lp)
            K = P[t - 1] @ Ht.T @ pinv(Ht @ P[t - 1] @ Ht.T +
                                       sgmR * np.eye(lt))
            mu_[t] = matB @ mu_[t - 1] + K @ (xt - Ht @ matB @ mu_[t - 1])
            psi[t] = (np.eye(Lp) - K @ Ht) @ P[t - 1]

    # backward
    mu_h[-1] = mu_[-1]
    psih[-1] = psi[-1]
    for t in tqdm(list(reversed(range(T - 1))), desc='backward'):
        J[t] = psi[t] @ matB.T @ pinv(P[t])
        mu_h[t] = mu_[t] + J[t] @ (mu_h[t + 1] - matB @ mu_[t])
        psih[t] = psi[t] + J[t] @ (psih[t + 1] - P[t]) @ J[t].T

    # compute expectations
    ztt = np.zeros((T, Lp, Lp))
    zt_ = np.zeros((T, Lp, Lp))
    cov_zt_ = np.zeros((T, Lp, Lp))
    for t in trange(T, desc='compute expectations'):
        if t > 0:
            cov_zt_[t] = psih[t] @ J[t - 1].T
            zt_[t] = cov_zt_[t] + np.outer(mu_h[t], mu_h[t - 1])
        ztt[t] = psih[t] + np.outer(mu_h[t], mu_h[t])
    zt = mu_h
    cov_ztt = psih
    return zt, cov_ztt, cov_zt_, ztt, zt_
예제 #2
0
    def predict(self, X, Y):
        """Compute the HOPLS for X and Y wrt the parameters R, Ln and Km.

        Parameters:
            X: tensorly Tensor, The tensor we wish to do a prediction from.
            Of shape [i1, ... iN], N >= 3.

            Y: tensorly Tensor, used only for the shape of the prediction.

        Returns:
            Y_pred: tensorly Tensor, The predicted Y from the model.
        """
        _, Q, D, _, W = self.model
        best_q2 = +np.inf
        if len(Y.shape) > 2:
            Q_star = []
            for r in range(self.R):
                Qkron = kronecker([Q[r][self.M - m - 1] for m in range(self.M)])
                Q_star.append(torch.mm(matricize(D[r][np.newaxis, ...]), Qkron.t()))
            Q_star = torch.cat(Q_star)
        q2s = []
        for r in range(1, self.R + 1):
            if len(Y.shape) == 2:
                Q_star = torch.mm(D[:r, :r], Q[:, :r].t())
            inter = torch.mm(W[:, :r], Q_star[:r])
            Y_pred = np.reshape(torch.mm(matricize(X), inter), Y.shape, order="F")
            Q2 = metric_nmse(Y.numpy(), Y_pred.numpy())
            q2s.append(Q2)
            if Q2 < best_q2:
                best_q2 = Q2
                best_r = r
                best_Y_pred = Y_pred

        return best_Y_pred, best_r, q2s
예제 #3
0
파일: facets.py 프로젝트: kokikwbt/facets
def _m_step(Xt, Wt, T, S, L, N, U, B, z0, sgm0, sgmO, sgmR, sgmV, xi, _lambda,
            Ev, Evv, Ez, Ezzt, Ezz_, mode):
    """
    Eq. (12)
    """
    Umat = kronecker(U, reverse=True)
    Bmat = kronecker(B, reverse=True)
    Lm = np.prod(L)

    z0_new = deepcopy(Ez[0])
    psi0_new = Ezzt[0] - np.outer(Ez[0], Ez[0])
    sgm0_new = np.trace(Ezzt[0] - np.outer(Ez[0], Ez[0])) / Lm
    if sgm0_new < 1.e-7:
        sgm0_new = 1.e-7

    res = np.trace(
        sum(Ezzt[1:]) - sum(Ezz_[1:]) @ Bmat.T - (sum(Ezz_[1:]) @ Bmat.T).T +
        Bmat @ sum(Ezzt[:-1]) @ Bmat.T)
    sgmO_new = res / ((T - 1) * Lm)

    res = 0
    for t in range(T):
        Wvec = Wt[t].reshape(-1)
        Xvec = Xt[t].reshape(-1)[Wvec]
        # Wvec = tensor_to_vec(Wt[t])
        # Xvec = tensor_to_vec(Xt[t])[Wvec]
        Uobs = Umat[Wvec, :]
        res += np.trace(Uobs @ Ezzt[t] @ Uobs.T)
        res += Xvec @ Xvec - 2 * Xvec @ (Uobs @ Ez[t])
    print(res, Wt.sum())
    sgmR_new = res / Wt.sum()

    if _lambda > 0:
        sgmV_new = sum([np.trace(Evv[mode][j]) for j in range(N[mode])])
        sgmV_new /= (N[mode] * L[mode])
        xi_new = sum([
            S[:, j] @ S[:, j] - 2 * S[:, j] @ U[mode] @ Ev[mode][j]
            for j in range(N[mode])
        ])
        xi_new += np.trace(U[mode] @ sum(Evv[mode]) @ U[mode].T)
        xi_new /= N[mode]**2
    else:
        sgmV_new = sgmV[mode]
        xi_new = xi[mode]

    return z0_new, psi0_new, sgm0_new, sgmO_new, sgmR_new, sgmV_new, xi_new
예제 #4
0
파일: facets.py 프로젝트: kokikwbt/facets
def update_observation_tensor(mode, Xt, Wt, T, S, L, M, N, U, _lambda, EZ, Ev,
                              Evv, cov_ZZt, z0, sgm0, sgmO, sgmR, sgmV, xi):
    """
    Eq. (19), (20)
    """
    G = kronecker(U, skip_matrix=mode, reverse=True).T
    for i in trange(N[mode], desc=f"update U[{mode}]"):
        A_11, A_12, A_21, A_22 = _compute_A(_lambda, mode, i, G, Xt, Wt, T, S,
                                            L, M, N, EZ, Ev, Evv, cov_ZZt)
        numer = _lambda * A_11 / xi + (1 - _lambda) * A_12 / sgmR
        denom = _lambda * A_21 / xi + (1 - _lambda) * A_22 / sgmR
        U[mode][i, :] = numer @ pinv(denom)
    return U[mode]
def Tensor_MM_kron(Tx, modej, U_list):
    """
    Calculate X_{[j]}U^{(-modej)}T
    Tx: input tensor
    modej: index for mode j; here consider mode from 0, 1, ..., d - 1
    U_list: a list of projection matrix, length = d
    """
    U_temp = U_list[:]
    U_temp.pop(modej)  #Delete first, then reverse and do kroncker product.
    U_temp = U_temp[::-1]
    res = kronecker(U_temp)
    les = tl.unfold(Tx, mode=modej)
    ans = np.dot(les, res.T)
    return ans
예제 #6
0
파일: facets.py 프로젝트: kokikwbt/facets
def update_transition_tensor(mode, L, B, covZZt, covZZ_, EZ):
    T = len(covZZt)
    F = kronecker(B, skip_matrix=mode, reverse=True).T
    Lm = L[mode]
    Ln = int(np.prod(L) / Lm)
    C1 = np.zeros((Lm, Lm))
    C2 = np.zeros((Lm, Lm))
    for t in trange(1, T, desc=f'update B[{mode}]'):
        for j in range(Ln):
            C1 += _compute_b(F, covZZt[t - 1], j)  # t = 1..T-1
            C1 += np.outer(EZ[t - 1] @ F[:, j], EZ[t - 1] @ F[:, j])
            C2 += _compute_a(F, covZZ_[t], j)  # t = 2..T
            C2 += np.outer(EZ[t, :, j], EZ[t - 1] @ F[:, j])
    return C2 @ pinv(C1)
예제 #7
0
    def predict(self, X, Y):
        """Compute the HOPLS for X and Y wrt the parameters R, Ln and Km.

        Parameters:
            X: tensorly Tensor, The tensor we wish to do a prediction from.
            Of shape [i1, ... iN], N >= 3.

            Y: tensorly Tensor, used only for the shape of the prediction.

        Returns:
            Y_pred: tensorly Tensor, The predicted Y from the model.
        """
        _, Q, D, _, W = self.model
        best_q2 = -np.inf
        if len(Y.shape) > 2:
            Q_star = []
            for r in range(self.R):
                Qkron = kronecker(
                    [Q[r][self.M - m - 1] for m in range(self.M)])
                Q_star.append(
                    np.matmul(matricize(D[r][np.newaxis, ...]), Qkron.T))
            Q_star = np.concatenate(Q_star)
        for r in range(1, self.R + 1):
            if len(Y.shape) == 2:
                Q_star = np.matmul(D[:r, :r], Q[:, :r].T)
            inter = np.matmul(W[:, :r], Q_star[:r])
            Y_pred = np.reshape(np.matmul(matricize(X), inter),
                                Y.shape,
                                order="F")
            Q2 = qsquared(Y, Y_pred)
            if Q2 > best_q2:
                best_q2 = Q2
                best_r = r
                best_Y_pred = Y_pred

        return Y_pred, best_r, best_q2
예제 #8
0
파일: recon.py 프로젝트: kokikwbt/facets
T = X.shape[-1]
M = X.ndim - 1
z = np.loadtxt(outdir + "vec_z.txt")
U = [np.loadtxt(outdir + f"U_{i}.txt") for i in range(M)]
ranks = [U[i].shape[1] for i in range(M)]
Z = np.zeros((T, *ranks))
for t in range(T):
    Z[t] = z[t].reshape(ranks)

plt.plot(z)
plt.show()
for m in range(M):
    pred = np.zeros((T, X.shape[m]))
    for t in range(T):
        print(unfold(Z[t], m).shape, U[m].shape)
        print(kronecker(U, skip_matrix=m, reverse=True).T.shape)
        X_n = U[m] @ unfold(Z[t], m) @ kronecker(
            U, skip_matrix=m, reverse=True).T
        pred[t] = X_n[:, m]
    # plt.plot((X, -1)[:, i])
    plt.plot(pred)
    plt.show()

matU = kronecker(U, reverse=True)
predict = z @ matU.T
print(predict.shape)
print(unfold(X, -1).shape)
for i in range(unfold(X, -1).shape[1]):
    plt.plot(unfold(X, -1)[:, i])
    plt.plot(predict[:, i])
    plt.show()
예제 #9
0
    def _fit_2d(self, X, Y):
        """
        Compute the HOPLS for X and Y wrt the parameters R, Ln and Km for the special case mode_Y = 2.

        Parameters:
            X: tensorly Tensor, The target tensor of shape [i1, ... iN], N = 2.

            Y: tensorly Tensor, The target tensor of shape [j1, ... jM], M >= 3.

        Returns:
            G: Tensor, The core Tensor of the HOPLS for X, of shape (R, L2, ..., LN).

            P: List, The N-1 loadings of X.

            D: Tensor, The core Tensor of the HOPLS for Y, of shape (R, K2, ..., KN).

            Q: List, The N-1 loadings of Y.

            ts: Tensor, The latent vectors of the HOPLS, of shape (i1, R).
        """

        # Initialization
        Er, Fr = X, Y
        P, T, W, Q = [], [], [], []
        D = tl.zeros((self.R, self.R))
        G = []

        # Beginning of the algorithm
        # Gr, _ = tucker(Er, ranks=[1] + self.Ln)
        for r in range(self.R):
            if torch.norm(Er) > self.epsilon and torch.norm(Fr) > self.epsilon:
                # computing the covariance
                Cr = mode_dot(Er, Fr.t(), 0)

                # HOOI tucker decomposition of C
                Gr_C, latents = tucker(Cr, rank=[1] + self.Ln)

                # Getting P and Q loadings
                qr = latents[0]
                qr /= torch.norm(qr)
                # Pr = latents[1:]
                Pr = [a / torch.norm(a) for a in latents[1:]]
                P.append(Pr)
                tr = multi_mode_dot(Er, Pr, list(range(1, len(Pr) + 1)), transpose=True)
                # Gr_pi = torch.pinverse(matricize(Gr))
                # tr = torch.mm(matricize(tr), Gr_pi)
                GrC_pi = torch.pinverse(matricize(Gr_C))
                tr = torch.mm(matricize(tr), GrC_pi)
                tr /= torch.norm(tr)

                # recomposition of the core tensor of Y
                ur = torch.mm(Fr, qr)
                dr = torch.mm(ur.t(), tr)

                D[r, r] = dr
                Pkron = kronecker([Pr[self.N - n - 1] for n in range(self.N)])
                # P.append(torch.mm(matricize(Gr), Pkron.t()).t())
                # W.append(torch.mm(Pkron, Gr_pi))
                Q.append(qr)
                T.append(tr)
                Gd = tl.tucker_to_tensor([Er, [tr] + Pr], transpose_factors=True)
                Gd_pi = torch.pinverse(matricize(Gd))
                W.append(torch.mm(Pkron, Gd_pi))

                # Deflation
                # X_hat = torch.mm(torch.cat(T, dim=1), torch.cat(P, dim=1).t())
                # Er = X - np.reshape(X_hat, (Er.shape), order="F")
                Er = Er - tl.tucker_to_tensor([Gd, [tr] + Pr])
                Fr = Fr - dr * torch.mm(tr, qr.t())
            else:
                break

        Q = torch.cat(Q, dim=1)
        T = torch.cat(T, dim=1)
        # P = torch.cat(P, dim=1)
        W = torch.cat(W, dim=1)

        self.model = (P, Q, D, T, W)
        return self
예제 #10
0
    def fit(self, X, Y):
        """
        Compute the HOPLS for X and Y wrt the parameters R, Ln and Km.

        Parameters:
            X: tensorly Tensor, The target tensor of shape [i1, ... iN], N >= 3.

            Y: tensorly Tensor, The target tensor of shape [j1, ... jM], M >= 3.

        Returns:
            G: Tensor, The core Tensor of the HOPLS for X, of shape (R, L2, ..., LN).

            P: List, The N-1 loadings of X.

            D: Tensor, The core Tensor of the HOPLS for Y, of shape (R, K2, ..., KN).

            Q: List, The N-1 loadings of Y.

            ts: Tensor, The latent vectors of the HOPLS, of shape (i1, R).
        """
        # check parameters
        X_mode = len(X.shape)
        Y_mode = len(Y.shape)
        assert Y_mode >= 2, "Y need to be mode 2 minimum."
        assert X_mode >= 3, "X need to be mode 3 minimum."
        assert (
            len(self.Ln) == X_mode - 1
        ), f"The list of ranks for the decomposition of X (Ln) need to be equal to the mode of X -1: {X_mode-1}."
        if Y_mode == 2:
            return self._fit_2d(X, Y)
        assert (
            len(self.Km) == Y_mode - 1
        ), f"The list of ranks for the decomposition of Y (Km) need to be equal to the mode of Y -1: {Y_mode-1}."

        # Initialization
        Er, Fr = X, Y
        In = X.shape
        T, G, P, Q, D, W = [], [], [], [], [], []

        # Beginning of the algorithm
        for r in range(self.R):
            if torch.norm(Er) > self.epsilon and torch.norm(Fr) > self.epsilon:
                Cr = torch.Tensor(np.tensordot(Er, Fr, (0, 0)))
                # HOOI tucker decomposition of C
                _, latents = tucker(Cr, ranks=self.Ln + self.Km)

                # Getting P and Q loadings
                Pr = latents[: len(Er.shape) - 1]
                Qr = latents[len(Er.shape) - 1 :]

                # computing product of Er by latents of X
                tr = multi_mode_dot(Er, Pr, list(range(1, len(Pr))), transpose=True)

                # Getting t as the first leading left singular vector of the product
                tr = torch.svd(matricize(tr))[0][:, 0]
                tr = tr[..., np.newaxis]

                # recomposition of the core tensors
                Gr = tl.tucker_to_tensor(Er, [tr] + Pr, transpose_factors=True)
                Dr = tl.tucker_to_tensor(Fr, [tr] + Qr, transpose_factors=True)
                Pkron = kronecker([Pr[self.N - n - 1] for n in range(self.N)])
                Gr_pi = torch.pinverse(matricize(Gr))
                W.append(torch.mm(Pkron, Gr_pi))

                # Gathering of
                P.append(Pr)
                Q.append(Qr)
                G.append(Gr)
                D.append(Dr)
                T.append(tr)

                # Deflation
                Er = Er - tl.tucker_to_tensor(Gr, [tr] + Pr)
                Fr = Fr - tl.tucker_to_tensor(Dr, [tr] + Qr)
            else:
                break

        T = torch.cat(T, dim=1)
        W = torch.cat(W, dim=1)
        self.model = (P, Q, D, T, W)
        return self
예제 #11
0
def compose_tensor(n, vec):
	vecs = n_unit_norm_vecs(n, vec.shape[0])
	print(type(vecs))
	print(vecs)
	return kronecker(np.array(vecs, vecs))
예제 #12
0
파일: facets.py 프로젝트: kokikwbt/facets
def reconstruct_matrix(U, Z, mode):
    # Lemma 3.2
    index = np.ones(len(U), dtype=bool)
    index[mode] = False
    return U[mode] @ Z @ kronecker(U[ind][::-1]).T