def em_pca(data_matrix, n_pcs, tolerance=.000001):
    [n_vars, n_obs] = data_matrix.shape
    
    # Start with an initial guess for the loadings matrix (or eigenvectors)
    loadings = matrix(rand_array(n_vars, n_pcs))
    
    prev_err = float("inf")
    while(True):
        # E-STEP - compute new scores based on current loadings
        # a.k.a. compute projected data based on current eigenvectors
        proj_data = inv(loadings.transpose() * loadings) * loadings.transpose() * data_matrix
    
        # M-STEP - compute new loadings based on current scores
        # a.k..a. compute best eigenvectors based on current projected data
        loadings = data_matrix * proj_data.transpose() * inv(proj_data * proj_data.transpose())
        
        # Compute the squared error
        error = square(data_matrix - loadings * proj_data).sum()
        
        # If the error is no longer decreasing, then we have converged
        error_ratio = error / prev_err
        print(error_ratio)
        if (error_ratio > (1 - tolerance)):
            break
        prev_err = error

    # Now that the EM algorithm has converged, the loadings span the correct lower-
    # dimensional space, and the data is projected into it.  We now use the regular
    # PCA on this lower-dimensional matrix to finish it off and decorrelate the
    # dimensions
    proj_matrix = inv(loadings.transpose() * loadings) * loadings.transpose()
    proj_data =  proj_matrix * data_matrix
    

    new_cov = cov(proj_data)
    orth_cov, rmat = qr(new_cov)
    del(rmat)
    orth_cov = matrix(orth_cov)
    


    """
    new_pcs, new_projected_data = pca(proj_data, n_pcs)

    print proj_matrix.shape
    print new_pcs.shape
    print new_projected_data.shape
    final_pcs = (new_pcs * proj_matrix).transpose() 
    new_data = final_pcs.transpose() * data_matrix
    """
    
    
    import pdb; pdb.set_trace()
    return final_pcs , new_projected_data
def em_pca(data_matrix, n_pcs, tolerance=.000001):
    [n_vars, n_obs] = data_matrix.shape

    # Start with an initial guess for the loadings matrix (or eigenvectors)
    loadings = matrix(rand_array(n_vars, n_pcs))

    prev_err = float("inf")
    while (True):
        # E-STEP - compute new scores based on current loadings
        # a.k.a. compute projected data based on current eigenvectors
        proj_data = inv(loadings.transpose() *
                        loadings) * loadings.transpose() * data_matrix

        # M-STEP - compute new loadings based on current scores
        # a.k..a. compute best eigenvectors based on current projected data
        loadings = data_matrix * proj_data.transpose() * inv(
            proj_data * proj_data.transpose())

        # Compute the squared error
        error = square(data_matrix - loadings * proj_data).sum()

        # If the error is no longer decreasing, then we have converged
        error_ratio = error / prev_err
        print(error_ratio)
        if (error_ratio > (1 - tolerance)):
            break
        prev_err = error

    # Now that the EM algorithm has converged, the loadings span the correct lower-
    # dimensional space, and the data is projected into it.  We now use the regular
    # PCA on this lower-dimensional matrix to finish it off and decorrelate the
    # dimensions
    proj_matrix = inv(loadings.transpose() * loadings) * loadings.transpose()
    proj_data = proj_matrix * data_matrix

    new_cov = cov(proj_data)
    orth_cov, rmat = qr(new_cov)
    del (rmat)
    orth_cov = matrix(orth_cov)
    """
    new_pcs, new_projected_data = pca(proj_data, n_pcs)

    print proj_matrix.shape
    print new_pcs.shape
    print new_projected_data.shape
    final_pcs = (new_pcs * proj_matrix).transpose() 
    new_data = final_pcs.transpose() * data_matrix
    """

    import pdb
    pdb.set_trace()
    return final_pcs, new_projected_data
Exemple #3
0
import datetime
import os
from weakref import ref

from numpy import array_split, short, fromstring
from numpy.random import rand as rand_array

import anfft

import RPi.GPIO as GPIO


pins = [0, 1, 4, 7, 8, 9, 10, 11, 14, 15, 17, 18, 21, 22, 23, 24, 25]

# Warm up ANFFT, allowing it to determine which FFT algorithm will work fastest on this machine.
anfft.fft(rand_array(1024), measure=True)


class SpectrumAnalyzer(object):
    def __init__(self, messageQueue, config):
        self.messageQueue = messageQueue
        self._dataSinceLastSpectrum = []

        self.loadSettings(config)

        self.lights = LightController(self, config)

    def loadSettings(self, gcp):
        # The number of spectrum analyzer bands (also light output channels) to use.
        self.frequencyBands = int(gcp.get('main', 'frequencyBands', 16))