コード例 #1
0
ファイル: mpca.py プロジェクト: ZelinW/pykale
    def inverse_transform(self, x):
        """Reconstruct projected data to the original shape and add the estimated mean back

        Args:
            x (array-like tensor): Data to be reconstructed, shape (n_samples, P_1, P_2, ..., P_N), if
                self.return_vector == False, where P_1, P_2, ..., P_N are the reduced dimensions of of corresponding
                mode (1, 2, ..., N), respectively. If self.return_vector == True, shape (n_samples, self.n_components)
                or shape (n_samples, P_1 * P_2 * ... * P_N).

        Returns:
            array-like tensor:
                Reconstructed tensor in original shape, shape (n_samples, I_1, I_2, ..., I_N)
        """
        # reshape x to tensor in shape (n_samples, self.shape_out) if x has been unfolded
        if x.ndim <= 2:
            if x.ndim == 1:
                # reshape x to a 2D matrix (1, n_components) if x in shape (n_components,)
                x = x.reshape((1, -1))
            n_samples = x.shape[0]
            n_features = x.shape[1]
            if n_features <= np.prod(self.shape_out):
                x_ = np.zeros((n_samples, np.prod(self.shape_out)))
                x_[:, self.idx_order[:n_features]] = x[:]
            else:
                msg = "Feature dimension exceeds the shape upper limit."
                logging.error(msg)
                raise ValueError(msg)

            x = fold(x_, mode=0, shape=((n_samples, ) + self.shape_out))

        x_rec = multi_mode_dot(x,
                               self.proj_mats,
                               modes=[m for m in range(1, self.n_dims)],
                               transpose=True)

        x_rec = x_rec + self.mean_

        return x_rec
コード例 #2
0
def select_top_weight(weights, select_ratio: float = 0.05):
    """Select top weights in magnitude, and the rest of weights will be zeros

    Args:
        weights (array-like): model weights, can be a vector or a higher order tensor
        select_ratio (float, optional): ratio of top weights to be selected. Defaults to 0.05.

    Returns:
        array-like: top weights in the same shape with the input model weights
    """
    if type(weights) != np.ndarray:
        weights = np.array(weights)
    orig_shape = weights.shape

    if len(orig_shape) > 1:
        weights = unfold(weights, mode=0)[0]
    n_top_weights = int(weights.size * select_ratio)
    top_weight_idx = (-1 * abs(weights)).argsort()[:n_top_weights]
    top_weights = np.zeros(weights.size)
    top_weights[top_weight_idx] = weights[top_weight_idx]
    if len(orig_shape) > 1:
        top_weights = fold(top_weights, mode=0, shape=orig_shape)

    return top_weights
コード例 #3
0
        Y_pred_linear = Y_scaler.inverse_transform(Y_pred_linear)
        Y_pred_MT = Y_pred_MT + Y_pred_linear
    
    ####################################### Evaluation ########################
                                        
    NPMs_MT = normative_prob_map(Y_test, Y_pred_MT, Y_pred_cov_MT, s_n2_MT)
    NPMs_MT[~np.isfinite(NPMs_MT)] = 0
    EVD_params = extreme_value_prob_fit(NPMs_MT[:train_num,:], 0.01)
    abnormal_probs_MT = extreme_value_prob(EVD_params, NPMs_MT[train_num:,:], 0.01)
    auc_all[r,] = roc_auc_score(labels, abnormal_probs_MT)   
    auc_SCHZ[r] = roc_auc_score(labels[(diagnosis_labels==0) | (diagnosis_labels==1),], abnormal_probs_MT[(diagnosis_labels==0) | (diagnosis_labels==1),])
    auc_ADHD[r] = roc_auc_score(labels[(diagnosis_labels==0) | (diagnosis_labels==2),], abnormal_probs_MT[(diagnosis_labels==0) | (diagnosis_labels==2),])
    auc_BIPL[r] = roc_auc_score(labels[(diagnosis_labels==0) | (diagnosis_labels==3),], abnormal_probs_MT[(diagnosis_labels==0) | (diagnosis_labels==3),])    
         
    print ('Results'  + 'AUC = %f' %(auc_all[r,]))
    
    ################################## Saving Results ######################### 
    
    ex_img = nib.load(main_dir + 'derivatives/task/sub-10159/taskswitch.feat/example_func.nii.gz')
    NPMs_MT = fold(NPMs_MT, mode=0, shape = [NPMs_MT.shape[0],Y_shape[0],Y_shape[1],Y_shape[2]])
    original_image_size[0] = NPMs_MT.shape[0]
    temp = np.zeros(original_image_size, dtype=np.float32)
    temp[:,x_from:x_to,y_from:y_to,z_from:z_to] = NPMs_MT
    image = nib.Nifti1Image(np.transpose(temp, [1,2,3,0]), ex_img.affine, ex_img.header)
    nib.save(image, save_path + method + '_' + str(b) + '_' + str(nb) + '_NPMs_' + str(r) + '.nii.gz')
    
    savemat(save_path + method + '_' + str(b) + '_' + str(nb) + '_results.mat',
            {'elapsed_time_opt': elapsed_time_opt, 'elapsed_time_est': elapsed_time_est, 
             'auc_all':auc_all, 'auc_SCHZ':auc_SCHZ, 'auc_ADHD':auc_ADHD, 'auc_BIPL':auc_BIPL,
             'hyperparams':hyperparams_opt})
コード例 #4
0
# -*- coding: utf-8 -*-
"""
Basic tensor operations
=======================

Example on how to use :mod:`tensorly.base` to perform basic tensor operations.

"""

import matplotlib.pyplot as plt
from tensorly.base import unfold, fold
import numpy as np

###########################################################################
# A tensor is simply a numpy array
tensor = np.arange(24).reshape((3, 4, 2))
print('* original tensor:\n{}'.format(tensor))

###########################################################################
# Unfolding a tensor is easy
for mode in range(tensor.ndim):
    print('* mode-{} unfolding:\n{}'.format(mode, unfold(tensor, mode)))

###########################################################################
# Re-folding the tensor is as easy:
for mode in range(tensor.ndim):
    unfolding = unfold(tensor, mode)
    folded = fold(unfolding, mode, tensor.shape)
    print(np.all(folded == tensor))
コード例 #5
0
def robust_pca(X,
               mask=None,
               tol=10e-7,
               reg_E=1,
               reg_J=1,
               mu_init=10e-5,
               mu_max=10e9,
               learning_rate=1.1,
               n_iter_max=100,
               verbose=1):
    """Robust Tensor PCA via ALM with support for missing values

        Decomposes a tensor `X` into the sum of a low-rank component `D`
        and a sparse component `E`.

    Parameters
    ----------
    X : ndarray
        tensor data of shape (n_samples, N1, ..., NS)
    mask : ndarray
        array of booleans with the same shape as `X`
        should be zero where the values are missing and 1 everywhere else
    tol : float
        convergence value
    reg_E : float, optional, default is 1
        regularisation on the sparse part `E`
    reg_J : float, optional, default is 1
        regularisation on the low rank part `D`
    mu_init : float, optional, default is 10e-5
        initial value for mu
    mu_max : float, optional, default is 10e9
        maximal value for mu
    learning_rate : float, optional, default is 1.1
        percentage increase of mu at each iteration
    n_iter_max : int, optional, default is 100
        maximum number of iteration
    verbose : int, default is 1
        level of verbosity

    Returns
    -------
    (D, E)
        Robust decomposition of `X`

    D : `X`-like array
        low-rank part
    E : `X`-like array
        sparse error part

    Notes
    -----
    The problem we solve is, for an input tensor :math:`\\tilde X`:

    .. math::
       :nowrap:

        \\begin{equation*}
        \\begin{aligned}
           & \\min_{\\{J_i\\}, \\tilde D, \\tilde E}
           & & \\sum_{i=1}^N  \\text{reg}_J \\|J_i\\|_* + \\text{reg}_E \\|E\\|_1 \\\\
           & \\text{subject to}
           & & \\tilde X  = \\tilde A + \\tilde E \\\\
           & & & A_{[i]} =  J_i,  \\text{ for each } i \\in \\{1, 2, \\cdots, N\\}\\\\
        \\end{aligned}
        \\end{equation*}

    """
    if mask is None:
        mask = 1

    # Initialise the decompositions
    D = T.zeros_like(X, **T.context(X))  # low rank part
    E = T.zeros_like(X, **T.context(X))  # sparse part
    L_x = T.zeros_like(X, **T.context(
        X))  # Lagrangian variables for the (X - D - E - L_x/mu) term
    J = [T.zeros_like(X, **T.context(X))
         for _ in range(T.ndim(X))]  # Low-rank modes of X
    L = [T.zeros_like(X, **T.context(X))
         for _ in range(T.ndim(X))]  # Lagrangian or J

    # Norm of the reconstructions at each iteration
    rec_X = []
    rec_D = []

    mu = mu_init

    for iteration in range(n_iter_max):

        for i in range(T.ndim(X)):
            J[i] = fold(
                svd_thresholding(
                    unfold(D, i) + unfold(L[i], i) / mu, reg_J / mu), i,
                X.shape)

        D = L_x / mu + X - E
        for i in range(T.ndim(X)):
            D += J[i] - L[i] / mu
        D /= (T.ndim(X) + 1)

        E = soft_thresholding(X - D + L_x / mu, mask * reg_E / mu)

        # Update the lagrangian multipliers
        for i in range(T.ndim(X)):
            L[i] += mu * (D - J[i])

        L_x += mu * (X - D - E)

        mu = min(mu * learning_rate, mu_max)

        # Evolution of the reconstruction errors
        rec_X.append(T.norm(X - D - E, 2))
        rec_D.append(max([T.norm(low_rank - D, 2) for low_rank in J]))

        # Convergence check
        if iteration > 1:
            if max(rec_X[-1], rec_D[-1]) <= tol:
                if verbose:
                    print('\nConverged in {} iterations'.format(iteration))
                break
            else:
                print("[INFO] iter:", iteration, " error:",
                      (max(rec_X[-1], rec_D[-1]).item()))

    return D, E