Example #1
0
def center_matrix(M, dim=0):
    """
    Return the matrix M with each row having zero mean and unit std

    if dim=1, center columns rather than rows
    """
    # todo: implement this w/o loop.  Allow optional arg to specify
    # dimension to remove the mean from
    if dim == 1: M = transpose(M)
    M = array(M, Float)
    if len(M.shape) == 1 or M.shape[0] == 1 or M.shape[1] == 1:
        M = M - mean(M)
        sigma = std(M)
        if sigma > 0:
            M = divide(M, sigma)
        if dim == 1: M = transpose(M)
        return M

    for i in range(M.shape[0]):
        M[i] -= mean(M[i])
        sigma = std(M[i])
        if sigma > 0:
            M[i] = divide(M[i], sigma)
    if dim == 1: M = transpose(M)
    return M
Example #2
0
def center_matrix(M, dim=0):
    """
    Return the matrix M with each row having zero mean and unit std

    if dim=1, center columns rather than rows
    """
    # todo: implement this w/o loop.  Allow optional arg to specify
    # dimension to remove the mean from
    if dim==1: M = transpose(M)
    M = array(M, Float)
    if len(M.shape)==1 or M.shape[0]==1 or M.shape[1]==1:
       M = M-mean(M)
       sigma = std(M)
       if sigma>0:
          M = divide(M, sigma)
       if dim==1: M=transpose(M)
       return M
     
    for i in range(M.shape[0]):
        M[i] -= mean(M[i])
        sigma = std(M[i])
        if sigma>0:
           M[i] = divide(M[i], sigma)
    if dim==1: M=transpose(M)
    return M
Example #3
0
    def __call__(self, value):

        vmin = self.vmin
        vmax = self.vmax

        if type(value) in [IntType, FloatType]:
            vtype = 'scalar'
            val = array([value])
        else:
            vtype = 'array'
            val = array(value)
        if vmin is None or vmax is None:
            rval = ravel(val)
            if vmin is None:
                vmin = min(rval)
            if vmax is None:
                vmax = max(rval)
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin==vmax:
            return 0.*value
        else:
            val = where(val<vmin, vmin, val)
            val = where(val>vmax, vmax, val)
            result = divide(val-vmin, vmax-vmin)
        if vtype == 'scalar':
            result = result[0]
        return result
Example #4
0
def psd(x, NFFT=256, Fs=2, detrend=detrend_none,
        window=window_hanning, noverlap=0):
    """
    The power spectral density by Welches average periodogram method.
    The vector x is divided into NFFT length segments.  Each segment
    is detrended by function detrend and windowed by function window.
    noperlap gives the length of the overlap between segments.  The
    absolute(fft(segment))**2 of each segment are averaged to compute Pxx,
    with a scaling to correct for power loss due to windowing.  Fs is
    the sampling frequency.

    -- NFFT must be a power of 2
    -- detrend and window are functions, unlike in matlab where they are
       vectors.
    -- if length x < NFFT, it will be zero padded to NFFT
    

    Returns the tuple Pxx, freqs

    Refs:
      Bendat & Piersol -- Random Data: Analysis and Measurement
        Procedures, John Wiley & Sons (1986)

    """

    if NFFT % 2:
        raise ValueError, 'NFFT must be a power of 2'

    # zero pad x up to NFFT if it is shorter than NFFT
    if len(x)<NFFT:
        n = len(x)
        x = resize(x, (NFFT,))
        x[n:] = 0
    

    # for real x, ignore the negative frequencies
    if x.typecode()==Complex: numFreqs = NFFT
    else: numFreqs = NFFT//2+1
        
    windowVals = window(ones((NFFT,),x.typecode()))
    step = NFFT-noverlap
    ind = range(0,len(x)-NFFT+1,step)
    n = len(ind)
    Pxx = zeros((numFreqs,n), Float)
    # do the ffts of the slices
    for i in range(n):
        thisX = x[ind[i]:ind[i]+NFFT]
        thisX = windowVals*detrend(thisX)
        fx = absolute(fft(thisX))**2
        Pxx[:,i] = divide(fx[:numFreqs], norm(windowVals)**2)

    # Scale the spectrum by the norm of the window to compensate for
    # windowing loss; see Bendat & Piersol Sec 11.5.2
    if n>1:
       Pxx = mean(Pxx,1)

    freqs = Fs/NFFT*arange(numFreqs)
    Pxx.shape = len(freqs),

    return Pxx, freqs
Example #5
0
    def __call__(self, value):

        vmin = self.vmin
        vmax = self.vmax
        if type(value) in [IntType, FloatType]:
            vtype = 'scalar'
            val = array([value])
        else:
            vtype = 'array'
            val = array(value)
        if vmin is None or vmax is None:
            rval = ravel(val)
            if vmin is None:
                vmin = min(rval)
            if vmax is None:
                vmax = max(rval)
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin==vmax:
            return 0.*value
        else:
            val = where(val<vmin, vmin, val)
            val = where(val>vmax, vmax, val)
            result = divide(val-vmin, vmax-vmin)
        if vtype == 'scalar':
            result = result[0]
        return result
Example #6
0
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none,
           window=window_hanning, noverlap=0):
    """
    cohere the coherence between x and y.  Coherence is the normalized
    cross spectral density

    Cxy = |Pxy|^2/(Pxx*Pyy)

    The return value is (Cxy, f), where f are the frequencies of the
    coherence vector.  See the docs for psd and csd for information
    about the function arguments NFFT, detrend, windowm noverlap, as
    well as the methods used to compute Pxy, Pxx and Pyy.

    Returns the tuple Cxy, freqs

    """
    
    if len(x)<2*NFFT:
       raise RuntimeError('Coherence is calculated by averaging over NFFT length segments.  Your signal is too short for your choice of NFFT')
    Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap)
    Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap)
    Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap)

    Cxy = divide(absolute(Pxy)**2, Pxx*Pyy)
    Cxy.shape = len(f),
    return Cxy, f
Example #7
0
def entropy(y, bins):
    """
   Return the entropy of the data in y

   \sum p_i log2(p_i) where p_i is the probability of observing y in
   the ith bin of bins.  bins can be a number of bins or a range of
   bins; see hist

   Compare S with analytic calculation for a Gaussian
   x = mu + sigma*randn(200000)
   Sanalytic = 0.5  * ( 1.0 + log(2*pi*sigma**2.0) ) 

   """

    n, bins = hist(y, bins)
    n = n.astype(Float)

    n = take(n, nonzero(n))  # get the positive

    p = divide(n, len(y))

    delta = bins[1] - bins[0]
    S = -1.0 * asum(p * log(p)) + log(delta)
    #S = -1.0*asum(p*log(p))
    return S
Example #8
0
def entropy(y, bins):
   """
   Return the entropy of the data in y

   \sum p_i log2(p_i) where p_i is the probability of observing y in
   the ith bin of bins.  bins can be a number of bins or a range of
   bins; see hist

   Compare S with analytic calculation for a Gaussian
   x = mu + sigma*randn(200000)
   Sanalytic = 0.5  * ( 1.0 + log(2*pi*sigma**2.0) ) 

   """

   
   n,bins = hist(y, bins)
   n = n.astype(Float)

   n = take(n, nonzero(n))         # get the positive

   p = divide(n, len(y))

   delta = bins[1]-bins[0]
   S = -1.0*asum(p*log(p)) + log(delta)
   #S = -1.0*asum(p*log(p))
   return S
Example #9
0
def cohere(x,
           y,
           NFFT=256,
           Fs=2,
           detrend=detrend_none,
           window=window_hanning,
           noverlap=0):
    """
    cohere the coherence between x and y.  Coherence is the normalized
    cross spectral density

    Cxy = |Pxy|^2/(Pxx*Pyy)

    The return value is (Cxy, f), where f are the frequencies of the
    coherence vector.  See the docs for psd and csd for information
    about the function arguments NFFT, detrend, windowm noverlap, as
    well as the methods used to compute Pxy, Pxx and Pyy.

    Returns the tuple Cxy, freqs

    """

    if len(x) < 2 * NFFT:
        raise RuntimeError(
            'Coherence is calculated by averaging over NFFT length segments.  Your signal is too short for your choice of NFFT'
        )
    Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap)
    Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap)
    Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap)

    Cxy = divide(absolute(Pxy)**2, Pxx * Pyy)
    Cxy.shape = len(f),
    return Cxy, f
Example #10
0
def psd(x, NFFT=256, Fs=2, detrend=detrend_none,
        window=window_hanning, noverlap=0):
    """
    The power spectral density by Welches average periodogram method.
    The vector x is divided into NFFT length segments.  Each segment
    is detrended by function detrend and windowed by function window.
    noperlap gives the length of the overlap between segments.  The
    absolute(fft(segment))**2 of each segment are averaged to compute Pxx,
    with a scaling to correct for power loss due to windowing.  Fs is
    the sampling frequency.

    -- NFFT must be a power of 2
    -- detrend and window are functions, unlike in matlab where they are
       vectors.
    -- if length x < NFFT, it will be zero padded to NFFT
    

    Returns the tuple Pxx, freqs

    Refs:
      Bendat & Piersol -- Random Data: Analysis and Measurement
        Procedures, John Wiley & Sons (1986)

    """

    if NFFT % 2:
        raise ValueError, 'NFFT must be a power of 2'

    # zero pad x up to NFFT if it is shorter than NFFT
    if len(x)<NFFT:
        n = len(x)
        x = resize(x, (NFFT,))
        x[n:] = 0
    

    # for real x, ignore the negative frequencies
    if x.typecode()==Complex: numFreqs = NFFT
    else: numFreqs = NFFT//2+1
        
    windowVals = window(ones((NFFT,),x.typecode()))
    step = NFFT-noverlap
    ind = range(0,len(x)-NFFT+1,step)
    n = len(ind)
    Pxx = zeros((numFreqs,n), Float)
    # do the ffts of the slices
    for i in range(n):
        thisX = x[ind[i]:ind[i]+NFFT]
        thisX = windowVals*detrend(thisX)
        fx = absolute(fft(thisX))**2
        Pxx[:,i] = divide(fx[:numFreqs], norm(windowVals)**2)

    # Scale the spectrum by the norm of the window to compensate for
    # windowing loss; see Bendat & Piersol Sec 11.5.2
    if n>1:
       Pxx = mean(Pxx,1)

    freqs = Fs/NFFT*arange(numFreqs)
    Pxx.shape = len(freqs),

    return Pxx, freqs
def makeMappingArray(N, data):
    """Create an N-element 1-d lookup table

    data represented by a list of x,y0,y1 mapping correspondences.
    Each element in this list represents how a value between 0 and 1
    (inclusive) represented by x is mapped to a corresponding value
    between 0 and 1 (inclusive). The two values of y are to allow
    for discontinuous mapping functions (say as might be found in a
    sawtooth) where y0 represents the value of y for values of x
    <= to that given, and y1 is the value to be used for x > than
    that given). The list must start with x=0, end with x=1, and
    all values of x must be in increasing order. Values between
    the given mapping points are determined by simple linear interpolation.

    The function returns an array "result" where result[x*(N-1)]
    gives the closest value for values of x between 0 and 1.
    """
    try:
        adata = array(data)
    except:
        raise TypeError("data must be convertable to an array")
    shape = adata.shape
    if len(shape) != 2 and shape[1] != 3:
        raise ValueError("data must be nx3 format")

    x  = adata[:,0]
    y0 = adata[:,1]
    y1 = adata[:,2]

    if x[0] != 0. or x[-1] != 1.0:
        raise ValueError(
           "data mapping points must start with x=0. and end with x=1")
    if sometrue(sort(x)-x):
        raise ValueError(
           "data mapping points must have x in increasing order")
    # begin generation of lookup table
    x = x * (N-1)
    lut = zeros((N,), Float)
    xind = arange(float(N))
    ind = searchsorted(x, xind)[1:-1]

    lut[1:-1] = ( divide(xind[1:-1] - take(x,ind-1),
                         take(x,ind)-take(x,ind-1) )
                  *(take(y0,ind)-take(y1,ind-1)) + take(y1,ind-1))
    lut[0] = y1[0]
    lut[-1] = y0[-1]
    # ensure that the lut is confined to values between 0 and 1 by clipping it
    clip(lut, 0.0, 1.0)
    #lut = where(lut > 1., 1., lut)
    #lut = where(lut < 0., 0., lut)
    return lut
Example #12
0
def specgram(x,
             NFFT=256,
             Fs=2,
             detrend=detrend_none,
             window=window_hanning,
             noverlap=128):
    """
    Compute a spectrogram of data in x.  Data are split into NFFT
    length segements and the PSD of each section is computed.  The
    windowing function window is applied to each segment, and the
    amount of overlap of each segment is specified with noverlap

    See pdf for more info.

    The returned times are the midpoints of the intervals over which
    the ffts are calculated
    """
    x = asarray(x)
    assert (NFFT > noverlap)
    if log(NFFT) / log(2) != int(log(NFFT) / log(2)):
        raise ValueError, 'NFFT must be a power of 2'

    # zero pad x up to NFFT if it is shorter than NFFT
    if len(x) < NFFT:
        n = len(x)
        x = resize(x, (NFFT, ))
        x[n:] = 0

    # for real x, ignore the negative frequencies
    if typecode(x) == Complex: numFreqs = NFFT
    else: numFreqs = NFFT // 2 + 1

    windowVals = window(ones((NFFT, ), typecode(x)))
    step = NFFT - noverlap
    ind = arange(0, len(x) - NFFT + 1, step)
    n = len(ind)
    Pxx = zeros((numFreqs, n), Float)
    # do the ffts of the slices

    for i in range(n):
        thisX = x[ind[i]:ind[i] + NFFT]
        thisX = windowVals * detrend(thisX)
        fx = absolute(fft(thisX))**2
        # Scale the spectrum by the norm of the window to compensate for
        # windowing loss; see Bendat & Piersol Sec 11.5.2
        Pxx[:, i] = divide(fx[:numFreqs], norm(windowVals)**2)
    t = 1 / Fs * (ind + NFFT / 2)
    freqs = Fs / NFFT * arange(numFreqs)

    return Pxx, freqs, t
Example #13
0
def makeMappingArray(N, data):
    """Create an N-element 1-d lookup table
    
    data represented by a list of x,y0,y1 mapping correspondences.
    Each element in this list represents how a value between 0 and 1
    (inclusive) represented by x is mapped to a corresponding value
    between 0 and 1 (inclusive). The two values of y are to allow 
    for discontinuous mapping functions (say as might be found in a
    sawtooth) where y0 represents the value of y for values of x
    <= to that given, and y1 is the value to be used for x > than
    that given). The list must start with x=0, end with x=1, and 
    all values of x must be in increasing order. Values between
    the given mapping points are determined by simple linear interpolation.
    
    The function returns an array "result" where result[x*(N-1)]
    gives the closest value for values of x between 0 and 1.
    """
    try:
        adata = array(data)
    except:
        raise TypeError("data must be convertable to an array")
    shape = adata.shape
    if len(shape) != 2 and shape[1] != 3:
        raise ValueError("data must be nx3 format")

    x  = adata[:,0]
    y0 = adata[:,1]
    y1 = adata[:,2]

    if x[0] != 0. or x[-1] != 1.0:
        raise ValueError(
           "data mapping points must start with x=0. and end with x=1")
    if sometrue(sort(x)-x):
        raise ValueError(
           "data mapping points must have x in increasing order")
    # begin generation of lookup table
    x = x * (N-1)
    lut = zeros((N,), Float)
    xind = arange(float(N))
    ind = searchsorted(x, xind)[1:-1]
    
    lut[1:-1] = ( divide(xind[1:-1] - take(x,ind-1),
                         take(x,ind)-take(x,ind-1) )
                  *(take(y0,ind)-take(y1,ind-1)) + take(y1,ind-1))
    lut[0] = y1[0]
    lut[-1] = y0[-1]
    # ensure that the lut is confined to values between 0 and 1 by clipping it
    lut = where(lut > 1., 1., lut)
    lut = where(lut < 0., 0., lut)
    return lut
Example #14
0
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none,
             window=window_hanning, noverlap=128):
    """
    Compute a spectrogram of data in x.  Data are split into NFFT
    length segements and the PSD of each section is computed.  The
    windowing function window is applied to each segment, and the
    amount of overlap of each segment is specified with noverlap

    See pdf for more info.

    The returned times are the midpoints of the intervals over which
    the ffts are calculated
    """

    assert(NFFT>noverlap)
    if log(NFFT)/log(2) != int(log(NFFT)/log(2)):
       raise ValueError, 'NFFT must be a power of 2'

    # zero pad x up to NFFT if it is shorter than NFFT
    if len(x)<NFFT:
        n = len(x)
        x = resize(x, (NFFT,))
        x[n:] = 0
    

    # for real x, ignore the negative frequencies
    if x.typecode()==Complex: numFreqs = NFFT
    else: numFreqs = NFFT//2+1
        
    windowVals = window(ones((NFFT,),x.typecode()))
    step = NFFT-noverlap
    ind = arange(0,len(x)-NFFT+1,step)
    n = len(ind)
    Pxx = zeros((numFreqs,n), Float)
    # do the ffts of the slices

    for i in range(n):
        thisX = x[ind[i]:ind[i]+NFFT]
        thisX = windowVals*detrend(thisX)
        fx = absolute(fft(thisX))**2
        # Scale the spectrum by the norm of the window to compensate for
        # windowing loss; see Bendat & Piersol Sec 11.5.2
        Pxx[:,i] = divide(fx[:numFreqs], norm(windowVals)**2)
    t = 1/Fs*(ind+NFFT/2)
    freqs = Fs/NFFT*arange(numFreqs)

    return Pxx, freqs, t
Example #15
0
def corrcoef(*args):
    """
    corrcoef(X) where X is a matrix returns a matrix of correlation
    coefficients for each numrows observations and numcols variables.
    
    corrcoef(x,y) where x and y are vectors returns the matrix or
    correlation coefficients for x and y.

    Numeric arrays can be real or complex

    The correlation matrix is defined from the covariance matrix C as

    r(i,j) = C[i,j] / sqrt(C[i,i]*C[j,j])
    """

    if len(args) == 2:
        X = transpose(array([args[0]] + [args[1]]))
    elif len(args) == 1:
        X = args[0]
    else:
        raise RuntimeError, 'Only expecting 1 or 2 arguments'

    C = cov(X)

    if len(args) == 2:
        d = resize(diagonal(C), (2, 1))
        denom = numerix.mlab.sqrt(matrixmultiply(d, transpose(d)))
    else:
        dc = diagonal(C)
        N = len(dc)
        shape = N, N
        vi = resize(dc, shape)
        denom = numerix.mlab.sqrt(vi *
                                  transpose(vi))  # element wise multiplication

    r = divide(C, denom)
    try:
        return r.real
    except AttributeError:
        return r
Example #16
0
def corrcoef(*args):
    """
    corrcoef(X) where X is a matrix returns a matrix of correlation
    coefficients for each numrows observations and numcols variables.
    
    corrcoef(x,y) where x and y are vectors returns the matrix or
    correlation coefficients for x and y.

    Numeric arrays can be real or complex

    The correlation matrix is defined from the covariance matrix C as

    r(i,j) = C[i,j] / sqrt(C[i,i]*C[j,j])
    """


    if len(args)==2:
        X = transpose(array([args[0]]+[args[1]]))
    elif len(args)==1:
        X = args[0]
    else:
        raise RuntimeError, 'Only expecting 1 or 2 arguments'

    
    C = cov(X)

    if len(args)==2:
       d = resize(diagonal(C), (2,1))
       denom = numerix.mlab.sqrt(matrixmultiply(d,transpose(d)))
    else:
       dc = diagonal(C)
       N = len(dc)       
       shape = N,N
       vi = resize(dc, shape)
       denom = numerix.mlab.sqrt(vi*transpose(vi)) # element wise multiplication
       

    r = divide(C,denom)
    try: return r.real
    except AttributeError: return r
Example #17
0
def prepca(P, frac=0):
    """
    Compute the principal components of P.  P is a numVars x
    numObservations numeric array.  frac is the minimum fraction of
    variance that a component must contain to be included

    Return value are
    Pcomponents : a num components x num observations numeric array
    Trans       : the weights matrix, ie, Pcomponents = Trans*P
    fracVar     : the fraction of the variance accounted for by each
                  component returned
    """
    U,s,v = svd(P)
    varEach = s**2/P.shape[1]
    totVar = asum(varEach)
    fracVar = divide(varEach,totVar)
    ind = int(asum(fracVar>=frac))

    # select the components that are greater
    Trans = transpose(U[:,:ind])
    # The transformed data
    Pcomponents = matrixmultiply(Trans,P)
    return Pcomponents, Trans, fracVar[:ind]
Example #18
0
def prepca(P, frac=0):
    """
    Compute the principal components of P.  P is a numVars x
    numObservations numeric array.  frac is the minimum fraction of
    variance that a component must contain to be included

    Return value are
    Pcomponents : a num components x num observations numeric array
    Trans       : the weights matrix, ie, Pcomponents = Trans*P
    fracVar     : the fraction of the variance accounted for by each
                  component returned
    """
    U, s, v = svd(P)
    varEach = s**2 / P.shape[1]
    totVar = asum(varEach)
    fracVar = divide(varEach, totVar)
    ind = int(asum(fracVar >= frac))

    # select the components that are greater
    Trans = transpose(U[:, :ind])
    # The transformed data
    Pcomponents = matrixmultiply(Trans, P)
    return Pcomponents, Trans, fracVar[:ind]
Example #19
0
def csd(x,
        y,
        NFFT=256,
        Fs=2,
        detrend=detrend_none,
        window=window_hanning,
        noverlap=0):
    """
    The cross spectral density Pxy by Welches average periodogram
    method.  The vectors x and y are divided into NFFT length
    segments.  Each segment is detrended by function detrend and
    windowed by function window.  noverlap gives the length of the
    overlap between segments.  The product of the direct FFTs of x and
    y are averaged over each segment to compute Pxy, with a scaling to
    correct for power loss due to windowing.  Fs is the sampling
    frequency.

    NFFT must be a power of 2

    Returns the tuple Pxy, freqs

    

    Refs:
      Bendat & Piersol -- Random Data: Analysis and Measurement
        Procedures, John Wiley & Sons (1986)

    """

    if NFFT % 2:
        raise ValueError, 'NFFT must be a power of 2'

    # zero pad x and y up to NFFT if they are shorter than NFFT
    if len(x) < NFFT:
        n = len(x)
        x = resize(x, (NFFT, ))
        x[n:] = 0
    if len(y) < NFFT:
        n = len(y)
        y = resize(y, (NFFT, ))
        y[n:] = 0

    # for real x, ignore the negative frequencies
    if typecode(x) == Complex: numFreqs = NFFT
    else: numFreqs = NFFT // 2 + 1

    windowVals = window(ones((NFFT, ), typecode(x)))
    step = NFFT - noverlap
    ind = range(0, len(x) - NFFT + 1, step)
    n = len(ind)
    Pxy = zeros((numFreqs, n), Complex)

    # do the ffts of the slices
    for i in range(n):
        thisX = x[ind[i]:ind[i] + NFFT]
        thisX = windowVals * detrend(thisX)
        thisY = y[ind[i]:ind[i] + NFFT]
        thisY = windowVals * detrend(thisY)
        fx = fft(thisX)
        fy = fft(thisY)
        Pxy[:, i] = conjugate(fx[:numFreqs]) * fy[:numFreqs]

    # Scale the spectrum by the norm of the window to compensate for
    # windowing loss; see Bendat & Piersol Sec 11.5.2
    if n > 1: Pxy = mean(Pxy, 1)
    Pxy = divide(Pxy, norm(windowVals)**2)
    freqs = Fs / NFFT * arange(numFreqs)
    Pxy.shape = len(freqs),
    return Pxy, freqs
Example #20
0
def cohere_pairs(X,
                 ij,
                 NFFT=256,
                 Fs=2,
                 detrend=detrend_none,
                 window=window_hanning,
                 noverlap=0,
                 preferSpeedOverMemory=True,
                 progressCallback=donothing_callback,
                 returnPxx=False):
    """
    Cxy, Phase, freqs = cohere_pairs( X, ij, ...)
    
    Compute the coherence for all pairs in ij.  X is a
    numSamples,numCols Numeric array.  ij is a list of tuples (i,j).
    Each tuple is a pair of indexes into the columns of X for which
    you want to compute coherence.  For example, if X has 64 columns,
    and you want to compute all nonredundant pairs, define ij as

      ij = []
      for i in range(64):
          for j in range(i+1,64):
              ij.append( (i,j) )

    The other function arguments, except for 'preferSpeedOverMemory'
    (see below), are explained in the help string of 'psd'.

    Return value is a tuple (Cxy, Phase, freqs).

      Cxy -- a dictionary of (i,j) tuples -> coherence vector for that
        pair.  Ie, Cxy[(i,j) = cohere(X[:,i], X[:,j]).  Number of
        dictionary keys is len(ij)
      
      Phase -- a dictionary of phases of the cross spectral density at
        each frequency for each pair.  keys are (i,j).

      freqs -- a vector of frequencies, equal in length to either the
        coherence or phase vectors for any i,j key.  Eg, to make a coherence
        Bode plot:

          subplot(211)
          plot( freqs, Cxy[(12,19)])
          subplot(212)
          plot( freqs, Phase[(12,19)])
      
    For a large number of pairs, cohere_pairs can be much more
    efficient than just calling cohere for each pair, because it
    caches most of the intensive computations.  If N is the number of
    pairs, this function is O(N) for most of the heavy lifting,
    whereas calling cohere for each pair is O(N^2).  However, because
    of the caching, it is also more memory intensive, making 2
    additional complex arrays with approximately the same number of
    elements as X.

    The parameter 'preferSpeedOverMemory', if false, limits the
    caching by only making one, rather than two, complex cache arrays.
    This is useful if memory becomes critical.  Even when
    preferSpeedOverMemory is false, cohere_pairs will still give
    significant performace gains over calling cohere for each pair,
    and will use subtantially less memory than if
    preferSpeedOverMemory is true.  In my tests with a 43000,64 array
    over all nonredundant pairs, preferSpeedOverMemory=1 delivered a
    33% performace boost on a 1.7GHZ Athlon with 512MB RAM compared
    with preferSpeedOverMemory=0.  But both solutions were more than
    10x faster than naievly crunching all possible pairs through
    cohere.

    See test/cohere_pairs_test.py in the src tree for an example
    script that shows that this cohere_pairs and cohere give the same
    results for a given pair.

    """
    numRows, numCols = X.shape

    # zero pad if X is too short
    if numRows < NFFT:
        tmp = X
        X = zeros((NFFT, numCols), typecode(X))
        X[:numRows, :] = tmp
        del tmp

    numRows, numCols = X.shape
    # get all the columns of X that we are interested in by checking
    # the ij tuples
    seen = {}
    for i, j in ij:
        seen[i] = 1
        seen[j] = 1
    allColumns = seen.keys()
    Ncols = len(allColumns)
    del seen

    # for real X, ignore the negative frequencies
    if typecode(X) == Complex: numFreqs = NFFT
    else: numFreqs = NFFT // 2 + 1

    # cache the FFT of every windowed, detrended NFFT length segement
    # of every channel.  If preferSpeedOverMemory, cache the conjugate
    # as well
    windowVals = window(ones((NFFT, ), typecode(X)))
    ind = range(0, numRows - NFFT + 1, NFFT - noverlap)
    numSlices = len(ind)
    FFTSlices = {}
    FFTConjSlices = {}
    Pxx = {}
    slices = range(numSlices)
    normVal = norm(windowVals)**2
    for iCol in allColumns:
        progressCallback(i / Ncols, 'Cacheing FFTs')
        Slices = zeros((numSlices, numFreqs), Complex)
        for iSlice in slices:
            thisSlice = X[ind[iSlice]:ind[iSlice] + NFFT, iCol]
            thisSlice = windowVals * detrend(thisSlice)
            Slices[iSlice, :] = fft(thisSlice)[:numFreqs]

        FFTSlices[iCol] = Slices
        if preferSpeedOverMemory:
            FFTConjSlices[iCol] = conjugate(Slices)
        Pxx[iCol] = divide(mean(absolute(Slices)**2), normVal)
    del Slices, ind, windowVals

    # compute the coherences and phases for all pairs using the
    # cached FFTs
    Cxy = {}
    Phase = {}
    count = 0
    N = len(ij)
    for i, j in ij:
        count += 1
        if count % 10 == 0:
            progressCallback(count / N, 'Computing coherences')

        if preferSpeedOverMemory:
            Pxy = FFTSlices[i] * FFTConjSlices[j]
        else:
            Pxy = FFTSlices[i] * conjugate(FFTSlices[j])
        if numSlices > 1: Pxy = mean(Pxy)
        Pxy = divide(Pxy, normVal)
        Cxy[(i, j)] = divide(absolute(Pxy)**2, Pxx[i] * Pxx[j])
        Phase[(i, j)] = arctan2(Pxy.imag, Pxy.real)

    freqs = Fs / NFFT * arange(numFreqs)
    if returnPxx:
        return Cxy, Phase, freqs, Pxx
    else:
        return Cxy, Phase, freqs
Example #21
0
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
                  window=window_hanning, noverlap=0,
                  preferSpeedOverMemory=True,
                  progressCallback=donothing_callback,
                  returnPxx=False):

    """
    Cxy, Phase, freqs = cohere_pairs( X, ij, ...)
    
    Compute the coherence for all pairs in ij.  X is a
    numSamples,numCols Numeric array.  ij is a list of tuples (i,j).
    Each tuple is a pair of indexes into the columns of X for which
    you want to compute coherence.  For example, if X has 64 columns,
    and you want to compute all nonredundant pairs, define ij as

      ij = []
      for i in range(64):
          for j in range(i+1,64):
              ij.append( (i,j) )

    The other function arguments, except for 'preferSpeedOverMemory'
    (see below), are explained in the help string of 'psd'.

    Return value is a tuple (Cxy, Phase, freqs).

      Cxy -- a dictionary of (i,j) tuples -> coherence vector for that
        pair.  Ie, Cxy[(i,j) = cohere(X[:,i], X[:,j]).  Number of
        dictionary keys is len(ij)
      
      Phase -- a dictionary of phases of the cross spectral density at
        each frequency for each pair.  keys are (i,j).

      freqs -- a vector of frequencies, equal in length to either the
        coherence or phase vectors for any i,j key.  Eg, to make a coherence
        Bode plot:

          subplot(211)
          plot( freqs, Cxy[(12,19)])
          subplot(212)
          plot( freqs, Phase[(12,19)])
      
    For a large number of pairs, cohere_pairs can be much more
    efficient than just calling cohere for each pair, because it
    caches most of the intensive computations.  If N is the number of
    pairs, this function is O(N) for most of the heavy lifting,
    whereas calling cohere for each pair is O(N^2).  However, because
    of the caching, it is also more memory intensive, making 2
    additional complex arrays with approximately the same number of
    elements as X.

    The parameter 'preferSpeedOverMemory', if false, limits the
    caching by only making one, rather than two, complex cache arrays.
    This is useful if memory becomes critical.  Even when
    preferSpeedOverMemory is false, cohere_pairs will still give
    significant performace gains over calling cohere for each pair,
    and will use subtantially less memory than if
    preferSpeedOverMemory is true.  In my tests with a 43000,64 array
    over all nonredundant pairs, preferSpeedOverMemory=1 delivered a
    33% performace boost on a 1.7GHZ Athlon with 512MB RAM compared
    with preferSpeedOverMemory=0.  But both solutions were more than
    10x faster than naievly crunching all possible pairs through
    cohere.

    See test/cohere_pairs_test.py in the src tree for an example
    script that shows that this cohere_pairs and cohere give the same
    results for a given pair.

    """
    numRows, numCols = X.shape

    # zero pad if X is too short
    if numRows < NFFT:
        tmp = X
        X = zeros( (NFFT, numCols), X.typecode())
        X[:numRows,:] = tmp
        del tmp

    numRows, numCols = X.shape
    # get all the columns of X that we are interested in by checking
    # the ij tuples
    seen = {}
    for i,j in ij:
        seen[i]=1; seen[j] = 1
    allColumns = seen.keys()
    Ncols = len(allColumns)
    del seen
    
    # for real X, ignore the negative frequencies
    if X.typecode()==Complex: numFreqs = NFFT
    else: numFreqs = NFFT//2+1

    # cache the FFT of every windowed, detrended NFFT length segement
    # of every channel.  If preferSpeedOverMemory, cache the conjugate
    # as well
    windowVals = window(ones((NFFT,), X.typecode()))
    ind = range(0, numRows-NFFT+1, NFFT-noverlap)
    numSlices = len(ind)
    FFTSlices = {}
    FFTConjSlices = {}
    Pxx = {}
    slices = range(numSlices)
    normVal = norm(windowVals)**2
    for iCol in allColumns:
        progressCallback(i/Ncols, 'Cacheing FFTs')
        Slices = zeros( (numSlices,numFreqs), Complex)
        for iSlice in slices:                    
            thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
            thisSlice = windowVals*detrend(thisSlice)
            Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
            
        FFTSlices[iCol] = Slices
        if preferSpeedOverMemory:
            FFTConjSlices[iCol] = conjugate(Slices)
        Pxx[iCol] = divide(mean(absolute(Slices)**2), normVal)
    del Slices, ind, windowVals    

    # compute the coherences and phases for all pairs using the
    # cached FFTs
    Cxy = {}
    Phase = {}
    count = 0
    N = len(ij)
    for i,j in ij:
        count +=1
        if count%10==0:
            progressCallback(count/N, 'Computing coherences')

        if preferSpeedOverMemory:
            Pxy = FFTSlices[i] * FFTConjSlices[j]
        else:
            Pxy = FFTSlices[i] * conjugate(FFTSlices[j])
        if numSlices>1: Pxy = mean(Pxy)
        Pxy = divide(Pxy, normVal)
        Cxy[(i,j)] = divide(absolute(Pxy)**2, Pxx[i]*Pxx[j])
        Phase[(i,j)] =  arctan2(Pxy.imag, Pxy.real)

    freqs = Fs/NFFT*arange(numFreqs)
    if returnPxx:
       return Cxy, Phase, freqs, Pxx
    else:
       return Cxy, Phase, freqs
Example #22
0
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none,
        window=window_hanning, noverlap=0):
    """
    The cross spectral density Pxy by Welches average periodogram
    method.  The vectors x and y are divided into NFFT length
    segments.  Each segment is detrended by function detrend and
    windowed by function window.  noverlap gives the length of the
    overlap between segments.  The product of the direct FFTs of x and
    y are averaged over each segment to compute Pxy, with a scaling to
    correct for power loss due to windowing.  Fs is the sampling
    frequency.

    NFFT must be a power of 2

    Returns the tuple Pxy, freqs

    

    Refs:
      Bendat & Piersol -- Random Data: Analysis and Measurement
        Procedures, John Wiley & Sons (1986)

    """

    if NFFT % 2:
        raise ValueError, 'NFFT must be a power of 2'

    # zero pad x and y up to NFFT if they are shorter than NFFT
    if len(x)<NFFT:
        n = len(x)
        x = resize(x, (NFFT,))
        x[n:] = 0
    if len(y)<NFFT:
        n = len(y)
        y = resize(y, (NFFT,))
        y[n:] = 0

    # for real x, ignore the negative frequencies
    if x.typecode()==Complex: numFreqs = NFFT
    else: numFreqs = NFFT//2+1
        
    windowVals = window(ones((NFFT,),x.typecode()))
    step = NFFT-noverlap
    ind = range(0,len(x)-NFFT+1,step)
    n = len(ind)
    Pxy = zeros((numFreqs,n), Complex)

    # do the ffts of the slices
    for i in range(n):
        thisX = x[ind[i]:ind[i]+NFFT]
        thisX = windowVals*detrend(thisX)
        thisY = y[ind[i]:ind[i]+NFFT]
        thisY = windowVals*detrend(thisY)
        fx = fft(thisX)
        fy = fft(thisY)
        Pxy[:,i] = conjugate(fx[:numFreqs])*fy[:numFreqs]



    # Scale the spectrum by the norm of the window to compensate for
    # windowing loss; see Bendat & Piersol Sec 11.5.2
    if n>1: Pxy = mean(Pxy,1)
    Pxy = divide(Pxy, norm(windowVals)**2)
    freqs = Fs/NFFT*arange(numFreqs)
    Pxy.shape = len(freqs),
    return Pxy, freqs