Beispiel #1
0
    def __getitem__(self, indices):
        if isinstance(indices, int):
            # Select one dimension of one mode
            factor, next_factor, *factors = self.factors
            next_factor = tenalg.mode_dot(next_factor, factor[:, indices, :].squeeze(1), 0)
            return self.__class__([next_factor, *factors], self.tensorized_row_shape, 
                                      self.tensorized_column_shape, n_matrices=self.n_matrices[1:])
        
        elif isinstance(indices, slice):
            mixing_factor, *factors = self.factors
            factors = [mixing_factor[:, indices], *factors]
            return self.__class__(factors, self.tensorized_row_shape, 
                                      self.tensorized_column_shape, n_matrices=self.n_matrices[1:])

        else:
            factors = []
            all_contracted = True
            for i, index in enumerate(indices):
                if index is Ellipsis:
                    raise ValueError(f'Ellipsis is not yet supported, yet got indices={indices}, indices[{i}]={index}.')
                if isinstance(index, int):
                    if i:
                        factor = tenalg.mode_dot(factor, self.factors[i][:, index, :].T, -1)
                    else:
                        factor = self.factors[i][:, index, :]
                else:
                    if i:
                        if all_contracted:
                            factor = tenalg.mode_dot(self.factors[i][:, index, :], factor, 0)
                        else:
                            factors.append(factor)
                            factor = self.factors[i][:, index, :]
                    else:
                        factor = self.factors[i][:, index, :]
                    all_contracted = False

            # We have contracted all cores, so have a 2D matrix
            if factor.ndim == 2:
                if self.order == (i+1):
                    # No factors left
                    return factor.squeeze()
                else:
                    next_factor, *factors = self.factors[i+1:]
                    factor = tenalg.mode_dot(next_factor, factor, 0)
                    return self.__class__([factor, *factors], self.tensorized_row_shape, 
                                      self.tensorized_column_shape,
                                      n_matrices=self.n_matrices[len(indices):])
            else:
                return self.__class__([*factors, factor, *self.factors[i+1:]], self.tensorized_row_shape, 
                                      self.tensorized_column_shape,
                                      n_matrices=self.n_matrices[len(indices):])
def Tensor_matrixproduct(X, listoffactors):  # The parameters are tensors
    #This function computes the product of a N-order tensor with  N matrices
    #X is of tensor type as well as the matrices
    Res = tl.tensor(X)
    mode = -1
    for matrix in listoffactors:
        mode = mode + 1

        Res = tenalg.mode_dot(Res, matrix, mode)
    return Res
Beispiel #3
0
def Tensor_matrixproduct(X, listoffactors):
    #This function computes the product of a N-order tensor with  N matrices
    #X is of tensor type as well as the matrices
    #Res=T.tensor(np.copy(mxnet_backend.to_numpy(X)))
    Res = tl.tensor(X)
    mode = -1
    for matrix in listoffactors:
        mode = mode + 1

        Res = tenalg.mode_dot(Res, matrix, mode)
    return Res
def Tensor_matrixproduct(X,listoffactors):#The parameters are tensors(tensor and matrices)
    
    Res=tl.tensor(np.copy(mxnet_backend.to_numpy(X)))
    
    mode=-1
    for matrix in listoffactors:
        mode=mode+1
        
        Res=tenalg.mode_dot(Res,matrix,mode) 
       
    return Res
Beispiel #5
0
def main():
    # X = tnsr in the tutorial
    X = np.moveaxis(
        np.array([[[1, 3], [2, 4]], [[5, 7], [6, 8]], [[9, 11], [10, 12]]]), 0,
        2)

    A = np.linalg.svd(t_base.unfold(X, 0))[0]  # output A matches tutorial
    print("A = \n", A)

    B = np.linalg.svd(t_base.unfold(X, 1))[0]  # output B matches tutorial
    print("B = \n", B)

    C = np.linalg.svd(t_base.unfold(X, 2))[0]  # output C matches tutorial
    print("C = \n", C)

    g = t_alg.mode_dot(t_alg.mode_dot(t_alg.mode_dot(X, A.T, 0), B.T, 1), C.T,
                       2)
    print("g = \n", g)

    g2 = t_decomp.tucker(X, ranks=(2, 2, 3)).core
    print("g2 = \n", g)

    return 0
Beispiel #6
0
    def forward(self, input):
        '''
    perform the forward propagation of CPAC-Conv layer
    input: images with shape (h,w,channels) or (h,w)
    '''

        self.last_input = input

        if self.image_channels == 1:
            h, w = input.shape
            reshape_input = image.extract_patches_2d(
                input, (self.filter_h, self.filter_w))
            p, _, _ = reshape_input.shape
            reshape_input = reshape_input.reshape(p, self.filter_h,
                                                  self.filter_w, 1)
        else:
            h, w, _ = input.shape
            reshape_input = image.extract_patches_2d(
                input, (self.filter_h, self.filter_w))
            p, _, _, _ = reshape_input.shape

        for i in range(self.rank):

            result = mode_dot(reshape_input, self.factors[3][:, i], mode=3)
            result = mode_dot(result, self.factors[2][:, i], mode=2)
            result = mode_dot(result, self.factors[1][:, i], mode=1)
            result = np.outer(result, self.factors[0][:, i])

            if i == 0:
                output = result
            else:
                output += result

        output = output.reshape((h - 2, w - 2, self.num_filters))

        return output
    def __getitem__(self, indices):
        if isinstance(indices, int):
            # Select one dimension of one mode
            mixing_factor, *factors = self.factors
            core = tenalg.mode_dot(self.core, mixing_factor[indices, :], 0)
            return core, factors

        elif isinstance(indices, slice):
            mixing_factor, *factors = self.factors
            factors = [mixing_factor[indices, :], *factors]
            return self.__class__(self.core, factors)

        else:
            # Index multiple dimensions
            modes = []
            factors = []
            factors_contract = []
            for i, (index, factor) in enumerate(zip(indices, self.factors)):
                if index is Ellipsis:
                    raise ValueError(
                        f'Ellipsis is not yet supported, yet got indices={indices}, indices[{i}]={index}.'
                    )
                if isinstance(index, int):
                    modes.append(i)
                    factors_contract.append(factor[index, :])
                else:
                    factors.append(factor[index, :])

            core = tenalg.multi_mode_dot(self.core,
                                         factors_contract,
                                         modes=modes)
            factors = factors + self.factors[i + 1:]

            if factors:
                return self.__class__(core, factors)

            # Fully contracted tensor
            return core
Beispiel #8
0
def cp_mixrand(tensor, rank, **kwargs):
    """
    Performs mixing to decrease coherence amongst factors before applying randomized
    alternating-least squares to fit CP decomposition. Unmixes the factors before
    returning.
    """
    ndim = tensor.ndim

    # random orthogonal matrices for each tensor
    U = [np.linalg.qr(np.random.randn(s,s))[0] for s in tensor.shape]

    # mix tensor
    tensor_mix = tensor.copy()
    for mode, u in enumerate(U):
        tensor_mix = mode_dot(tensor_mix, u, mode)

    # call cp_rand as a subroutine
    factors_mix, info = cp_rand(tensor_mix, rank, **kwargs)

    # demix factors by inverting orthogonal matrices
    factors = [np.dot(u.T, fact) for u, fact in zip(U, factors_mix)]

    return factors, info
def blbs(shape_params,
         expression_params,
         pose,
         v_template,
         shapedirs,
         posedirs,
         J_regressor,
         parents,
         lbs_weights,
         pose2rot=True,
         dtype=torch.float32):
    batch_size = max(shape_params.shape[0], expression_params.shape[0],
                     pose.shape[0])
    NV = v_template.size(1)
    device = shape_params.device
    # Add shape contribution
    #v_shaped = v_template + blend_shapes(betas, shapedirs)
    w_ex = mode_dot(shapedirs, shape_params, 2)
    w_ex = w_ex.transpose(1, 2).transpose(0, 1)
    v_shaped = w_ex @ (expression_params[..., None])
    verts = v_shaped.view(batch_size, -1, 3)
    #v_shaped = mode_dot(w_ex, expression_params,1)
    #verts = v_shaped.squeeze(1).view(-1, 3, batch_size).transpose(1,2).transpose(0,1)
    return verts, None
Beispiel #10
0
    def _fit_2d(self, X, Y):
        """
        Compute the HOPLS for X and Y wrt the parameters R, Ln and Km for the special case mode_Y = 2.

        Parameters:
            X: tensorly Tensor, The target tensor of shape [i1, ... iN], N = 2.

            Y: tensorly Tensor, The target tensor of shape [j1, ... jM], M >= 3.

        Returns:
            G: Tensor, The core Tensor of the HOPLS for X, of shape (R, L2, ..., LN).

            P: List, The N-1 loadings of X.

            D: Tensor, The core Tensor of the HOPLS for Y, of shape (R, K2, ..., KN).

            Q: List, The N-1 loadings of Y.

            ts: Tensor, The latent vectors of the HOPLS, of shape (i1, R).
        """

        # Initialization
        Er, Fr = X, Y
        P, T, W, Q = [], [], [], []
        D = tl.zeros((self.R, self.R))
        G = []

        # Beginning of the algorithm
        # Gr, _ = tucker(Er, ranks=[1] + self.Ln)
        for r in range(self.R):
            if torch.norm(Er) > self.epsilon and torch.norm(Fr) > self.epsilon:
                # computing the covariance
                Cr = mode_dot(Er, Fr.t(), 0)

                # HOOI tucker decomposition of C
                Gr_C, latents = tucker(Cr, rank=[1] + self.Ln)

                # Getting P and Q loadings
                qr = latents[0]
                qr /= torch.norm(qr)
                # Pr = latents[1:]
                Pr = [a / torch.norm(a) for a in latents[1:]]
                P.append(Pr)
                tr = multi_mode_dot(Er, Pr, list(range(1, len(Pr) + 1)), transpose=True)
                # Gr_pi = torch.pinverse(matricize(Gr))
                # tr = torch.mm(matricize(tr), Gr_pi)
                GrC_pi = torch.pinverse(matricize(Gr_C))
                tr = torch.mm(matricize(tr), GrC_pi)
                tr /= torch.norm(tr)

                # recomposition of the core tensor of Y
                ur = torch.mm(Fr, qr)
                dr = torch.mm(ur.t(), tr)

                D[r, r] = dr
                Pkron = kronecker([Pr[self.N - n - 1] for n in range(self.N)])
                # P.append(torch.mm(matricize(Gr), Pkron.t()).t())
                # W.append(torch.mm(Pkron, Gr_pi))
                Q.append(qr)
                T.append(tr)
                Gd = tl.tucker_to_tensor([Er, [tr] + Pr], transpose_factors=True)
                Gd_pi = torch.pinverse(matricize(Gd))
                W.append(torch.mm(Pkron, Gd_pi))

                # Deflation
                # X_hat = torch.mm(torch.cat(T, dim=1), torch.cat(P, dim=1).t())
                # Er = X - np.reshape(X_hat, (Er.shape), order="F")
                Er = Er - tl.tucker_to_tensor([Gd, [tr] + Pr])
                Fr = Fr - dr * torch.mm(tr, qr.t())
            else:
                break

        Q = torch.cat(Q, dim=1)
        T = torch.cat(T, dim=1)
        # P = torch.cat(P, dim=1)
        W = torch.cat(W, dim=1)

        self.model = (P, Q, D, T, W)
        return self
Beispiel #11
0
 def forward(self, x):
     # Do the tensor contraction one nmode product at a time
     output = ta.mode_dot(x, self.factor1, mode=1)
     output = ta.mode_dot(output, self.factor2, mode=2)
     output = ta.mode_dot(output, self.factor3, mode=3)
     return output
Beispiel #12
0
def mode_product(a: torch.tensor, b: torch.tensor, axis: int):
    tmp = b.transpose(0, 1)
    return mode_dot(a, tmp, axis)
Beispiel #13
0
    def backprop(self, d_L_d_out, learn_rate):
        '''
    perform the backward propagation of CPAC-Conv layer
    '''

        if len(self.last_input.shape) == 3:
            input_w, input_h, input_c = self.last_input.shape
        else:
            input_w, input_h = self.last_input.shape
            input_c = 1

        h, w, c = d_L_d_out.shape
        d_L_d_currout = d_L_d_out
        d_L_d_preout = np.zeros((input_w, input_h, input_c))
        self.filters = tl.kruskal_to_tensor((self.factors))

        for curr_f in range(c):
            # loop through all filters
            curr_y = out_y = 0
            while curr_y + self.filter_h <= input_h:
                curr_x = out_x = 0
                while curr_x + self.filter_h <= input_h:
                    # loss gradient of the input to the convolution operation (conv1 in the case of this network)
                    d_L_d_preout[curr_y:curr_y + self.filter_h, curr_x:curr_x +
                                 self.filter_h, :] += d_L_d_currout[
                                     out_y, out_x,
                                     curr_f] * self.filters[curr_f]
                    curr_x += 1
                    out_x += 1
                curr_y += 1
                out_y += 1

        d_L_d_factor0 = np.zeros(self.factors[0].shape)  #(8,5)
        d_L_d_factor1 = np.zeros(self.factors[1].shape)  #(3,5)
        d_L_d_factor2 = np.zeros(self.factors[2].shape)  #(3,5)
        d_L_d_factor3 = np.zeros(self.factors[3].shape)  #(1,5)

        if self.image_channels == 1:
            last_input_reshape = image.extract_patches_2d(
                self.last_input, (self.filter_h, self.filter_w))
            p, _, _ = last_input_reshape.shape
            n = 1
            last_input_reshape = last_input_reshape.reshape(
                (p, self.filter_h, self.filter_w, n))

        else:
            last_input_reshape = image.extract_patches_2d(
                self.last_input, (self.filter_h, self.filter_w))
            p, _, _, n = last_input_reshape.shape

        d_L_d_out = d_L_d_out.reshape((p, self.num_filters))

        for i in range(self.rank):

            ##update K_r^N, d_L_d_factor0
            A_3 = mode_dot(last_input_reshape, self.factors[3][:, i], mode=3)
            A_2 = mode_dot(A_3, self.factors[2][:, i], mode=2)
            A_1 = mode_dot(A_2, self.factors[1][:, i], mode=1)
            A_1 = A_1.reshape((p, 1))
            I_1 = np.identity(self.num_filters)
            d_L_d_factor0[:, i] = np.sum((d_L_d_out * A_1), axis=0)

            ##update K_r^X, d_L_d_factor1
            d_L_d_factor1[:, i] = np.sum(
                np.kron(self.factors[0][:, i:i + 1], A_2) * d_L_d_out.reshape(
                    (p * self.num_filters, 1)),
                axis=0)

            ##update K_r^Y, d_L_d_factor2

            A_3_unfold = unfold(A_3, 2)
            I_2 = np.identity(p)
            B_2 = np.outer(self.factors[1][:, i], self.factors[0][:, i])
            B2I2 = np.kron(B_2, I_2)
            d_L_d_factor2[:, i] = np.sum(np.dot(A_3_unfold, B2I2) *
                                         d_L_d_out.reshape(
                                             (p * self.num_filters, 1)).T,
                                         axis=1)

            ##update K_r^S, d_L_d_factor3
            U_unfold = unfold(last_input_reshape, 3)
            I_3 = np.identity(self.filter_h * p)

            d_L_d_factor3[:, i] = np.sum(np.dot(
                U_unfold,
                np.dot(np.kron(self.factors[2][:, i:i + 1], I_3), B2I2)) *
                                         d_L_d_out.reshape(
                                             (p * self.num_filters, 1)).T,
                                         axis=1)

        ##adam
        d_factors = [
            d_L_d_factor0, d_L_d_factor1, d_L_d_factor2, d_L_d_factor3
        ]
        # Update filters
        for i in range(len(self.factors)):
            self.v[i] = self.beta1 * self.v[i] + (1 -
                                                  self.beta1) * d_factors[i]
            self.s[i] = self.beta2 * self.s[i] + (1 -
                                                  self.beta2) * d_factors[i]**2
            self.factors[i] -= learn_rate * self.v[i] / np.sqrt(self.s[i] +
                                                                1e-7)

        return d_L_d_preout
    def __getitem__(self, indices):
        counter = 0
        ndim = self.core.ndim
        new_ndim = 0
        new_factors = []
        out_shape = []
        new_modes = []

        core = self.core

        for (index, shape) in zip(indices, self.tensorized_shape):
            if isinstance(shape, int):
                if index is Ellipsis:
                    raise ValueError(
                        f'Ellipsis is not yet supported, yet got indices={indices}, indices[{i}]={index}.'
                    )
                factor = self.factors[counter]
                if isinstance(index, int):
                    core = tenalg.mode_dot(core, factor[index, :], new_ndim)
                else:
                    contracted = factor[index, :]
                    new_factors.append(contracted)
                    if contracted.shape[0] > 1:
                        out_shape.append(shape)
                        new_modes.append(new_ndim)
                        new_ndim += 1

                counter += 1

            else:  # Tensorized dimension
                n_tensorized_modes = len(shape)

                if index == slice(None) or index == ():
                    new_factors.extend(self.factors[counter:counter +
                                                    n_tensorized_modes])
                    out_shape.append(shape)
                    new_modes.extend(
                        [new_ndim + i for i in range(n_tensorized_modes)])
                    new_ndim += n_tensorized_modes

                else:
                    if isinstance(index, slice):
                        # Since we've already filtered out :, this is a partial slice
                        # Convert into list
                        max_index = math.prod(shape)
                        index = list(range(*index.indices(max_index)))

                    index = np.unravel_index(index, shape)

                    contraction_factors = [
                        f[idx, :] for idx, f in zip(
                            index, self.factors[counter:counter +
                                                n_tensorized_modes])
                    ]
                    if contraction_factors[0].ndim > 1:
                        shared_symbol = einsum_symbols[core.ndim + 1]
                    else:
                        shared_symbol = ''

                    core_symbols = ''.join(einsum_symbols[:core.ndim])
                    factors_symbols = ','.join([
                        f'{shared_symbol}{s}'
                        for s in core_symbols[new_ndim:new_ndim +
                                              n_tensorized_modes]
                    ])
                    res_symbol = core_symbols[:
                                              new_ndim] + shared_symbol + core_symbols[
                                                  new_ndim +
                                                  n_tensorized_modes:]

                    if res_symbol:
                        eq = core_symbols + ',' + factors_symbols + '->' + res_symbol
                    else:
                        eq = core_symbols + ',' + factors_symbols

                    core = torch.einsum(eq, core, *contraction_factors)

                    if contraction_factors[0].ndim > 1:
                        new_ndim += 1

                counter += n_tensorized_modes

        if counter <= ndim:
            out_shape.extend(list(core.shape[new_ndim:]))
            new_modes.extend(list(range(new_ndim, core.ndim)))
            new_factors.extend(self.factors[counter:])

        # Only here until our Tucker class handles partial-Tucker too
        if len(new_modes) != core.ndim:
            core = tenalg.multi_mode_dot(core, new_factors, new_modes)
            new_factors = []

        if new_factors:
            # return core, new_factors, out_shape, new_modes
            return self.__class__(core,
                                  new_factors,
                                  tensorized_shape=out_shape)

        return core
if __name__ == "__main__":
    main()
"""

#------------MAIN------------------

T, D = LoadData(P, I, imgVecSize)  #baza podataka

start_train = time.time()
#S, U_list = tucker(T)
#H = U_list[0]; F = U_list[1]; G = U_list[2]
S, H, F, G = HOSVD(T)
end_train = time.time()

B = mode_dot(S, G, 2)
Qs, Rs = QR(B)


def TestImg(z):
    predict = []
    norms = []
    Predict_pers = -2
    Predict_expr = -2
    Z = np.dot(F.T, z)
    for k in range(B.shape[2]):
        alfa = np.linalg.lstsq(B[:, :, k].T, Z)
        #alfa = np.linalg.lstsq(Rs[k], np.dot(Qs[k].T,Z))
        for i in range(H.shape[0]):
            norm = np.linalg.norm(alfa[0] - H[i, :])
            #print(str(i+1)+':  '+str(norm))