Exemple #1
0
    def test_PCA_whitening_clean(self):
        start = time.time()
        x_white, white, dewhite = ica.pca_whiten(self.clean_data, self.NCOMP)
        end = time.time()
        print('\ttime: {:.2f} seconds'.format(end - start))
        # Check output dimensions
        self.assertEqual(x_white.shape, (self.NCOMP, self.NVOX))
        self.assertEqual(white.shape, (self.NCOMP, self.NSUB))
        self.assertEqual(dewhite.shape, (self.NSUB, self.NCOMP))

        # Check variance is 1
        var = x_white.var(axis=1)
        self.assertLess(np.linalg.norm(var - 1.0), 1e-2)

        # Test wether the covariance of x_white is the identity
        cov = auto_cov(x_white)
        self.assertLess(
            np.linalg.norm(cov - np.eye(self.NCOMP)) / self.NCOMP / self.NCOMP,
            1e-6)
        # Test wether white and dewhite are orthogonals
        eye = np.dot(white, dewhite)
        self.assertLess(
            np.linalg.norm(eye - np.eye(self.NCOMP)) / self.NCOMP / self.NCOMP,
            1e-4)
        eye = np.dot(dewhite, white)
        self.assertLess(
            np.linalg.norm(eye - np.eye(self.NSUB)) / self.NSUB / self.NSUB,
            1e-4)
Exemple #2
0
def whiten (X, n_components) :
    N,R,K = X.shape
    X_white = zeros((n_components, R, K))
    wht     = zeros((n_components, N, K))
    de_wht  = zeros((N, n_components, K))
    for k in range(K) :
        X_white[:,:,k], wht[:,:,k], de_wht = pca_whiten(X[:,:,k], n_components)
        
    return X_white, wht, de_wht
Exemple #3
0
def pca_preprocess(name, comp, subjects, matrices, verbose=True) :
    assert len(subjects) == len(matrices)
    K = len(subjects)
    for k in range(K) :
        matrix  = loadmat(matrices[k])['A']
        subject = dot(matrix, loadmat(subjects[k])['S'])
        X_white, wht, de_wht = pca_whiten(subject, comp)
        savemat(name + index(k+1) + ".mat", {'X_white':X_white,'wht':wht,'de_wht':de_wht})
        if verbose :
            print "subject " + str(k+1) + " done"
Exemple #4
0
    def test_PCA_whitening_noisy(self):
        start = time.time()
        x_white, white, dewhite = ica.pca_whiten(self.noisy_data, self.NCOMP)
        end = time.time()
        print('\ttime: {:.2f} seconds'.format(end - start))
        self.assertEqual(x_white.shape, (self.NCOMP, self.NVOX))
        self.assertEqual(white.shape, (self.NCOMP, self.NSUB))
        self.assertEqual(dewhite.shape, (self.NSUB, self.NCOMP))

        cov = auto_cov(x_white)
        self.assertLess(np.linalg.norm(cov - np.eye(self.NCOMP)) / self.NCOMP / self.NCOMP, 1e-6)

        # Test wether white and dewhite are orthogonals
        eye = np.dot(white, dewhite)
        self.assertLess(np.linalg.norm(eye - np.eye(self.NCOMP)) / self.NCOMP / self.NCOMP, 1e-4)
        eye = np.dot(dewhite, white)
        self.assertLess(np.linalg.norm(eye - np.eye(self.NSUB)) / self.NSUB / self.NSUB, 1e-4)
Exemple #5
0
    def test_PCA_whitening_clean(self):
        start = time.time()
        x_white, white, dewhite = ica.pca_whiten(self.clean_data, self.NCOMP)
        end = time.time()
        print('\ttime: {:.2f} seconds'.format(end - start))
        # Check output dimensions
        self.assertEqual(x_white.shape, (self.NCOMP, self.NVOX))
        self.assertEqual(white.shape, (self.NCOMP, self.NSUB))
        self.assertEqual(dewhite.shape, (self.NSUB, self.NCOMP))

        # Check variance is 1
        var = x_white.var(axis=1)
        self.assertLess(np.linalg.norm(var - 1.0), 1e-2)

        # Test wether the covariance of x_white is the identity
        cov = auto_cov(x_white)
        self.assertLess(np.linalg.norm(cov - np.eye(self.NCOMP)) / self.NCOMP / self.NCOMP, 1e-6)
        # Test wether white and dewhite are orthogonals
        eye = np.dot(white, dewhite)
        self.assertLess(np.linalg.norm(eye - np.eye(self.NCOMP)) / self.NCOMP / self.NCOMP, 1e-4)
        eye = np.dot(dewhite, white)
        self.assertLess(np.linalg.norm(eye - np.eye(self.NSUB)) / self.NSUB / self.NSUB, 1e-4)
Exemple #6
0
    def test_PCA_whitening_noisy(self):
        start = time.time()
        x_white, white, dewhite = ica.pca_whiten(self.noisy_data, self.NCOMP)
        end = time.time()
        print('\ttime: {:.2f} seconds'.format(end - start))
        self.assertEqual(x_white.shape, (self.NCOMP, self.NVOX))
        self.assertEqual(white.shape, (self.NCOMP, self.NSUB))
        self.assertEqual(dewhite.shape, (self.NSUB, self.NCOMP))

        cov = auto_cov(x_white)
        self.assertLess(
            np.linalg.norm(cov - np.eye(self.NCOMP)) / self.NCOMP / self.NCOMP,
            1e-6)

        # Test wether white and dewhite are orthogonals
        eye = np.dot(white, dewhite)
        self.assertLess(
            np.linalg.norm(eye - np.eye(self.NCOMP)) / self.NCOMP / self.NCOMP,
            1e-4)
        eye = np.dot(dewhite, white)
        self.assertLess(
            np.linalg.norm(eye - np.eye(self.NSUB)) / self.NSUB / self.NSUB,
            1e-4)
Exemple #7
0
def iva_l (X, W_init, term_threshold=1e-6, term_crit='ChangeInCost',
           max_iter=2048, A = [], verbose=False, n_components=0) :
    '''
    IVA_L is the Independent Vector Analysis using multivariate Laplacian 
        distribution
    
    Inputs:
    -------
    X : 3-D data matrix. Note that HAS to be 3-D, even if only have one 
        subject.
    
    alpha0 : Float, optional
        Learning rate. Defaults to 0.1
    
    term_threshold : Float, optional
        How low does the termination Criterion have to be in order for the 
        algorithm to stop? Defaults to 1e-6
        
    term_crit : String, optional
        Termination Criterion. Only two options: ChangeInCost and ChangeInW. 
        Defaults to ChangeInCost.
        
    max_iter : Int, optional
        The maximum number of iterations the algorithm is allowed to run for.
        Defaults to 1024.
        
    W_init : array, shape=(K,N,N), optional
        Initial guess for W? Defaults to np.random.rand(N,N,K), where N
        is number of rows of X, and K is number of rows deep X is 
        
        (ie X.shape = (N,T,K), and W.shape = (N,N,K))
        
    verbose : Bool, optional
        Print iteration information to output? Defaults to False
    
    Outputs:
    --------
    W : array, shape = (N,N,K)
        The unmixing matrix W
    '''
    
    try :
        N,T,K = X.shape
    except ValueError :
        raise ValueError ('''X needs to be 3-D, or in (N,T,K) form. 
                             current matrix is %s''' % str(X.shape))
    
    cost = [np.NaN for x in range(max_iter)]
    if n_components > 0 :
        wht = zeros((n_components, N, K))
        de_wht = zeros((N, n_components, K))
        X_white = zeros((n_components, T, K))
        for k in range(K) :
            X_white[:,:,k], wht[:,:,k], de_wht[:,:,k] = pca_whiten(X[:,:,k], n_components, verbose=verbose)
        X = X_white
    Y = X.copy()
    N,T,K = X.shape
    
    if W_init == [] :
        W = np.random.rand(N,N,K)
    else :
        if W_init.shape == (N,N,K) :  
            W = W_init
        else :
            raise ValueError ('''W has to have dimension %i x %i for
                    each of the %i sites, in form (rows, columns. subjects)
                    \n W defaulting to random.''' % (N, N, K))
    
    if (term_crit != 'ChangeInW') and (term_crit != 'ChangeInCost') :
        raise ValueError ('''term_crit has to be either 
                          'ChangeInW' or 'ChangeInCost' ''')
    
    alphamin   = 0.01
    alpha_scale = 0.9
    alpha0     = 0.1
    
    back_num = 10.0
    backtrack = False
    ## Main Loop
    for iteration in range(max_iter) :
        term_criterion = 0
        
        ## Initial approximation to true source vectors
        for k in range(K) :
            Y[:,:,k] = np.dot(W[:,:,k], X[:,:,k])
            # Y[:,:,k] += np.random.laplace(size = (N,T)) *2
        
        ## Initializing values for the iteration summing over datasets, left with N x T
        ## dataset.
        sqrtYtY    = np.sqrt(np.sum(Y*Y,2))
        
        sqrtYtYInv = 1 / sqrtYtY
        
        cost[iteration] = _compute_cost(W, sqrtYtY)
        
        if iteration > 1 :                        
            if backtrack == True :
                if cost[iteration] < min(cost[0:iteration]) :
                    backtrack = False
            else :
                if cost[iteration] > min(cost[0:iteration]) :
                    backtrack = True
        
        if backtrack == False :
            dW = (1/back_num) * _get_dW(W, Y, sqrtYtYInv)
        else :
            W  -= dW
            dW *= 1.0/2.0
            back_num += 1
        
        W += dW
        
        ## Check termination Criterion
        if term_crit == 'ChangeInW' :
            for k in range(K) :
                tmp_W = W_old[:,:,k] - W[:,:,k]
                term_criterion = max(term_criterion, np.linalg.norm(tmp_W[:,:], ord=2))
        
        elif term_crit == 'ChangeInCost' :
            if iteration == 1 :
                term_criterion = 1
            
            else :
                term_criterion = (abs(cost[iteration-1]-cost[iteration])
                                 / abs(cost[iteration]))
        
        ## Check termination condition
        if term_criterion < term_threshold or iteration == max_iter :
            break
        elif np.isinf(cost[iteration]) :
            if verbose :
                print "W blew up, restarting with new initial value"
            for k in range(K) :
                W[:,:,k] = np.identity(N) + 0.1 * np.random.rand(N)
        
        ## Display iteration information
        if verbose :
            print "Step: %i \t W change: %f \t Cost %f" % (iteration, term_criterion, cost[iteration])
            
            ## End iteration
    
    if iteration==max_iter :
        print ('''Algorithm may have not converged, reached max
               number of iterations ''')
    print "a"
    ## Finish display
    if verbose :
        print "Algorithim converged, end results are: "
        print " Step: %i \n W change: %f \n Cost %f \n\n" % (iteration, term_criterion, cost[iteration])
    if n_components > 0 :
        return W, wht, de_wht, cost
    else :
        return W, cost
Exemple #8
0
def iva_l (X, W, term_threshold=1e-6, term_crit='ChangeInW',
           max_iter=10000, A = [], verbose=False, n_components=0) :
    
    cost = [np.NaN for x in range(max_iter)]
    if n_components > 0 :
        wht = zeros((n_components, N, K))
        de_wht = zeros((N, n_components, K))
        X_white = zeros((n_components, T, K))
        for k in range(K) :
            X_white[:,:,k], wht[:,:,k], de_wht[:,:,k] = pca_whiten(X[:,:,k], n_components, verbose=verbose)
        X = X_white
    Y = X.copy()
    N,T,K = X.shape
    
    s_cons = 1e-4
    alpha = 0.1
    dW_norm = 1.0
    for it in range(max_iter) :
        
        ## Initial approximation to true source vectors
        for k in range(K) :
            Y[:,:,k] = np.dot(W[:,:,k], X[:,:,k])
        
        sqrtYtY    = np.sqrt(np.sum(Y*Y,2))
        cost[it] = compute_cost(W, sqrtYtY)
        
        if it > 0 :
            back_num = 0
            while cost[it] > cost[it-1] - s_cons * alpha * old_norm :
                alpha *= 0.5
                W = W_old.copy() + alpha * dW
                for k in range(K) :
                    Y[:,:,k] = np.dot(W[:,:,k], X[:,:,k])
                sqrtYtY = np.sqrt(np.sum(Y*Y,2))
                cost[it] = compute_cost(W, sqrtYtY)
                
                if verbose :
                    print " Backtracking: %i \t Alpha : %.10f \t  Cost: %f" % (back_num, alpha, cost[it])
                back_num += 1
                
        sqrtYtYInv = 1 / sqrtYtY
        old_norm = dW_norm
        
        dW = get_dW(W, Y, sqrtYtYInv)
        dW_norm = grad_norm(dW)
        W_old = W.copy()
        
        alpha = get_alpha(it, alpha, old_norm, dW_norm)
        W += alpha * dW
        
        ## Check termination Criterion
        if term_crit == 'ChangeInW' :
            term_criterion = 0
            for k in range(K) :
                tmp_W = W_old[:,:,k] - W[:,:,k]
                term_criterion = max(term_criterion, np.linalg.norm(tmp_W[:,:], ord=2))
        
        elif term_crit == 'ChangeInCost' :
            if it == 0 :
                term_criterion = 1.0
            else :
                term_criterion = (abs(cost[it-1]-cost[it])
                                 / abs(cost[it]))
        
        ## Check termination condition
        if term_criterion < term_threshold or it == max_iter :
            break
        ## Display iteration information
        if verbose :
            print "Step: %i \t W change: %f \t Cost %f \t Alpha%f" % (it, term_criterion, cost[it], alpha)
            ## End iteration
    
    ## Finish display
    if verbose :
        print "Algorithim converged, end results are: "
        print " Step: %i \n W change: %f \n Cost %f \n\n" % (it, term_criterion, cost[it])
    if n_components > 0 :
        return W, wht, de_wht, cost
    else :
        return W, cost[:it]