def set_data(self, x, y, A): x = asarray(x).astype(Float32) y = asarray(y).astype(Float32) A = asarray(A) if len(x.shape) != 1 or len(y.shape) != 1\ or A.shape[0:2] != (y.shape[0], x.shape[0]): raise TypeError("Axes don't match array shape") if len(A.shape) not in [2, 3]: raise TypeError("Can only plot 2D or 3D data") if len(A.shape) == 3 and A.shape[2] not in [1, 3, 4]: raise TypeError("3D arrays must have three (RGB) or four (RGBA) color components") if len(A.shape) == 3 and A.shape[2] == 1: A.shape = A.shape[0:2] if len(A.shape) == 2: if typecode(A) != UInt8: A = (self.cmap(self.norm(A))*255).astype(UInt8) else: A = repeat(A[:,:,NewAxis], 4, 2) A[:,:,3] = 255 else: if typecode(A) != UInt8: A = (255*A).astype(UInt8) if A.shape[2] == 3: B = zeros(tuple(list(A.shape[0:2]) + [4]), UInt8) B[:,:,0:3] = A B[:,:,3] = 255 A = B self._A = A self._Ax = x self._Ay = y self._imcache = None
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0): """ The power spectral density by Welches average periodogram method. The vector x is divided into NFFT length segments. Each segment is detrended by function detrend and windowed by function window. noperlap gives the length of the overlap between segments. The absolute(fft(segment))**2 of each segment are averaged to compute Pxx, with a scaling to correct for power loss due to windowing. Fs is the sampling frequency. -- NFFT must be a power of 2 -- detrend and window are functions, unlike in matlab where they are vectors. -- if length x < NFFT, it will be zero padded to NFFT Returns the tuple Pxx, freqs Refs: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) """ if NFFT % 2: raise ValueError, 'NFFT must be a power of 2' # zero pad x up to NFFT if it is shorter than NFFT if len(x)<NFFT: n = len(x) x = resize(x, (NFFT,)) x[n:] = 0 # for real x, ignore the negative frequencies if typecode(x)==Complex: numFreqs = NFFT else: numFreqs = NFFT//2+1 windowVals = window(ones((NFFT,),typecode(x))) step = NFFT-noverlap ind = range(0,len(x)-NFFT+1,step) n = len(ind) Pxx = zeros((numFreqs,n), Float) # do the ffts of the slices for i in range(n): thisX = x[ind[i]:ind[i]+NFFT] thisX = windowVals*detrend(thisX) fx = absolute(fft(thisX))**2 Pxx[:,i] = divide(fx[:numFreqs], norm(windowVals)**2) # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2 if n>1: Pxx = mean(Pxx,1) freqs = Fs/NFFT*arange(numFreqs) Pxx.shape = len(freqs), return Pxx, freqs
def set_data(self, x, y, A): x = asarray(x).astype(Float32) y = asarray(y).astype(Float32) A = asarray(A) if len(x.shape) != 1 or len(y.shape) != 1\ or A.shape[0:2] != (y.shape[0], x.shape[0]): raise TypeError("Axes don't match array shape") if len(A.shape) not in [2, 3]: raise TypeError("Can only plot 2D or 3D data") if len(A.shape) == 3 and A.shape[2] not in [1, 3, 4]: raise TypeError( "3D arrays must have three (RGB) or four (RGBA) color components" ) if len(A.shape) == 3 and A.shape[2] == 1: A.shape = A.shape[0:2] if len(A.shape) == 2: if typecode(A) != UInt8: A = (self.cmap(self.norm(A)) * 255).astype(UInt8) else: A = repeat(A[:, :, NewAxis], 4, 2) A[:, :, 3] = 255 else: if typecode(A) != UInt8: A = (255 * A).astype(UInt8) if A.shape[2] == 3: B = zeros(tuple(list(A.shape[0:2]) + [4]), UInt8) B[:, :, 0:3] = A B[:, :, 3] = 255 A = B self._A = A self._Ax = x self._Ay = y self._imcache = None
def _draw_steps(self, renderer, gc, xt, yt): siz=len(xt) if siz<2: return xt2=ones((2*siz,), typecode(xt)) xt2[0:-1:2], xt2[1:-1:2], xt2[-1]=xt, xt[1:], xt[-1] yt2=ones((2*siz,), typecode(yt)) yt2[0:-1:2], yt2[1::2]=yt, yt gc.set_linestyle('solid') if self._newstyle: renderer.draw_lines(gc, xt2, yt2, self._transform) else: renderer.draw_lines(gc, xt2, yt2)
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=128): """ Compute a spectrogram of data in x. Data are split into NFFT length segements and the PSD of each section is computed. The windowing function window is applied to each segment, and the amount of overlap of each segment is specified with noverlap See pdf for more info. The returned times are the midpoints of the intervals over which the ffts are calculated """ x = asarray(x) assert (NFFT > noverlap) if log(NFFT) / log(2) != int(log(NFFT) / log(2)): raise ValueError, 'NFFT must be a power of 2' # zero pad x up to NFFT if it is shorter than NFFT if len(x) < NFFT: n = len(x) x = resize(x, (NFFT, )) x[n:] = 0 # for real x, ignore the negative frequencies if typecode(x) == Complex: numFreqs = NFFT else: numFreqs = NFFT // 2 + 1 windowVals = window(ones((NFFT, ), typecode(x))) step = NFFT - noverlap ind = arange(0, len(x) - NFFT + 1, step) n = len(ind) Pxx = zeros((numFreqs, n), Float) # do the ffts of the slices for i in range(n): thisX = x[ind[i]:ind[i] + NFFT] thisX = windowVals * detrend(thisX) fx = absolute(fft(thisX))**2 # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2 Pxx[:, i] = divide(fx[:numFreqs], norm(windowVals)**2) t = 1 / Fs * (ind + NFFT / 2) freqs = Fs / NFFT * arange(numFreqs) return Pxx, freqs, t
def set_array(self, A): 'Set the image array from numeric/numarray A' from numerix import typecode, typecodes if typecode(A) in typecodes['Float']: self._A = A.astype(nx.Float32) else: self._A = A.astype(nx.Int16)
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=128): """ Compute a spectrogram of data in x. Data are split into NFFT length segements and the PSD of each section is computed. The windowing function window is applied to each segment, and the amount of overlap of each segment is specified with noverlap See pdf for more info. The returned times are the midpoints of the intervals over which the ffts are calculated """ x = asarray(x) assert(NFFT>noverlap) if log(NFFT)/log(2) != int(log(NFFT)/log(2)): raise ValueError, 'NFFT must be a power of 2' # zero pad x up to NFFT if it is shorter than NFFT if len(x)<NFFT: n = len(x) x = resize(x, (NFFT,)) x[n:] = 0 # for real x, ignore the negative frequencies if typecode(x)==Complex: numFreqs=NFFT else: numFreqs = NFFT//2+1 windowVals = window(ones((NFFT,),typecode(x))) step = NFFT-noverlap ind = arange(0,len(x)-NFFT+1,step) n = len(ind) Pxx = zeros((numFreqs,n), Float) # do the ffts of the slices for i in range(n): thisX = x[ind[i]:ind[i]+NFFT] thisX = windowVals*detrend(thisX) fx = absolute(fft(thisX))**2 # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2 Pxx[:,i] = divide(fx[:numFreqs], norm(windowVals)**2) t = 1/Fs*(ind+NFFT/2) freqs = Fs/NFFT*arange(numFreqs) return Pxx, freqs, t
def vander(x,N=None): """ X = vander(x,N=None) The Vandermonde matrix of vector x. The i-th column of X is the the i-th power of x. N is the maximum power to compute; if N is None it defaults to len(x). """ if N is None: N=len(x) X = ones( (len(x),N), typecode(x)) for i in range(N-1): X[:,i] = x**(N-i-1) return X
def vander(x, N=None): """ X = vander(x,N=None) The Vandermonde matrix of vector x. The i-th column of X is the the i-th power of x. N is the maximum power to compute; if N is None it defaults to len(x). """ if N is None: N = len(x) X = ones((len(x), N), typecode(x)) for i in range(N - 1): X[:, i] = x**(N - i - 1) return X
def set_data(self, A, shape=None): """ Set the image array ACCEPTS: numeric/numarray/PIL Image A""" # check if data is PIL Image without importing Image if hasattr(A, 'getpixel'): X = pil_to_array(A) else: X = ma.asarray(A) # assume array if (typecode(X) != UInt8 or len(X.shape) != 3 or X.shape[2] > 4 or X.shape[2] < 3): cm.ScalarMappable.set_array(self, X) else: self._A = X self._imcache = None
def set_data(self, A, shape=None): """ Set the image array ACCEPTS: numeric/numarray/PIL Image A""" # check if data is PIL Image without importing Image if hasattr(A, "getpixel"): X = pil_to_array(A) else: X = ma.asarray(A) # assume array if typecode(X) != UInt8 or len(X.shape) != 3 or X.shape[2] > 4 or X.shape[2] < 3: cm.ScalarMappable.set_array(self, X) else: self._A = X self._imcache = None
def longest_contiguous_ones(x): """ return the indicies of the longest stretch of contiguous ones in x, assuming x is a vector of zeros and ones. """ if len(x) == 0: return array([]) ind = find(x == 0) if len(ind) == 0: return arange(len(x)) if len(ind) == len(x): return array([]) y = zeros((len(x) + 2, ), typecode(x)) y[1:-1] = x dif = diff(y) up = find(dif == 1) dn = find(dif == -1) ind = find(dn - up == max(dn - up)) ind = arange(take(up, ind), take(dn, ind)) return ind
def longest_contiguous_ones(x): """ return the indicies of the longest stretch of contiguous ones in x, assuming x is a vector of zeros and ones. """ if len(x)==0: return array([]) ind = find(x==0) if len(ind)==0: return arange(len(x)) if len(ind)==len(x): return array([]) y = zeros( (len(x)+2,), typecode(x)) y[1:-1] = x dif = diff(y) up = find(dif == 1); dn = find(dif == -1); ind = find( dn-up == max(dn - up)) ind = arange(take(up, ind), take(dn, ind)) return ind
def __call__(self, X, alpha=1.0): """ X is either a scalar or an array (of any dimension). If scalar, a tuple of rgba values is returned, otherwise an array with the new shape = oldshape+(4,). If the X-values are integers, then they are used as indices into the array. If they are floating point, then they must be in the interval (0.0, 1.0). Alpha must be a scalar. """ if not self._isinit: self._init() alpha = min(alpha, 1.0) # alpha must be between 0 and 1 alpha = max(alpha, 0.0) self._lut[:-3, -1] = alpha mask_bad = None if isinstance(X, (int, float)): vtype = 'scalar' xa = array([X]) else: vtype = 'array' xma = ma.asarray(X) xa = xma.filled(0) mask_bad = ma.getmaskorNone(xma) if typecode(xa) in typecodes['Float']: xa = where(xa == 1.0, 0.9999999, xa) # Tweak so 1.0 is in range. xa = (xa * self.N).astype(Int) mask_under = xa < 0 mask_over = xa > self.N-1 xa = where(mask_under, self._i_under, xa) xa = where(mask_over, self._i_over, xa) if mask_bad is not None: # and sometrue(mask_bad): xa = where(mask_bad, self._i_bad, xa) #print 'types', typecode(self._lut), typecode(xa), xa.shape rgba = take(self._lut, xa) if vtype == 'scalar': rgba = tuple(rgba[0,:]) #print rgba[0,1:10,:] # Now the same for numpy, numeric... return rgba
def __call__(self, X, alpha=1.0): """ X is either a scalar or an array (of any dimension). If scalar, a tuple of rgba values is returned, otherwise an array with the new shape = oldshape+(4,). If the X-values are integers, then they are used as indices into the array. If they are floating point, then they must be in the interval (0.0, 1.0). Alpha must be a scalar. """ if not self._isinit: self._init() alpha = min(alpha, 1.0) # alpha must be between 0 and 1 alpha = max(alpha, 0.0) self._lut[:-3, -1] = alpha mask_bad = None if isinstance(X, (int, float)): vtype = 'scalar' xa = array([X]) else: vtype = 'array' xma = ma.asarray(X) xa = xma.filled(0) mask_bad = ma.getmaskorNone(xma) if typecode(xa) in typecodes['Float']: xa = where(xa == 1.0, 0.9999999, xa) # Tweak so 1.0 is in range. xa = (xa * self.N).astype(Int) mask_under = xa < 0 mask_over = xa > self.N - 1 xa = where(mask_under, self._i_under, xa) xa = where(mask_over, self._i_over, xa) if mask_bad is not None: # and sometrue(mask_bad): xa = where(mask_bad, self._i_bad, xa) #print 'types', typecode(self._lut), typecode(xa), xa.shape rgba = take(self._lut, xa) if vtype == 'scalar': rgba = tuple(rgba[0, :]) #print rgba[0,1:10,:] # Now the same for numpy, numeric... return rgba
def __call__(self, X, alpha=1.0): """ X is either a scalar or an array (of any dimension). If scalar, a tuple of rgba values is returned, otherwise an array with the new shape = oldshape+(4,). If the X-values are integers, then they are used as indices into the array. If they are floating point, then they must be in the interval (0.0, 1.0). Alpha must be a scalar. """ if not self._isinit: self._init() alpha = min(alpha, 1.0) # alpha must be between 0 and 1 alpha = max(alpha, 0.0) self._lut[:-3, -1] = alpha mask_bad = None if not iterable(X): vtype = 'scalar' xa = array([X]) else: vtype = 'array' xma = ma.asarray(X) xa = xma.filled(0) mask_bad = ma.getmask(xma) if typecode(xa) in typecodes['Float']: putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1. xa = (xa * self.N).astype(Int) # Set the over-range indices before the under-range; # otherwise the under-range values get converted to over-range. putmask(xa, xa>self.N-1, self._i_over) putmask(xa, xa<0, self._i_under) if mask_bad is not None and mask_bad.shape == xa.shape: putmask(xa, mask_bad, self._i_bad) rgba = take(self._lut, xa) if vtype == 'scalar': rgba = tuple(rgba[0,:]) return rgba
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0): """ The power spectral density by Welches average periodogram method. The vector x is divided into NFFT length segments. Each segment is detrended by function detrend and windowed by function window. noperlap gives the length of the overlap between segments. The absolute(fft(segment))**2 of each segment are averaged to compute Pxx, with a scaling to correct for power loss due to windowing. Fs is the sampling frequency. -- NFFT must be a power of 2 -- detrend and window are functions, unlike in matlab where they are vectors. -- if length x < NFFT, it will be zero padded to NFFT Returns the tuple Pxx, freqs Refs: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) """ if NFFT % 2: raise ValueError, 'NFFT must be a power of 2' # zero pad x up to NFFT if it is shorter than NFFT if len(x) < NFFT: n = len(x) x = resize(x, (NFFT, )) x[n:] = 0 # for real x, ignore the negative frequencies if typecode(x) == Complex: numFreqs = NFFT else: numFreqs = NFFT // 2 + 1 windowVals = window(ones((NFFT, ), typecode(x))) step = NFFT - noverlap ind = range(0, len(x) - NFFT + 1, step) n = len(ind) Pxx = zeros((numFreqs, n), Float) # do the ffts of the slices for i in range(n): thisX = x[ind[i]:ind[i] + NFFT] thisX = windowVals * detrend(thisX) fx = absolute(fft(thisX))**2 Pxx[:, i] = divide(fx[:numFreqs], norm(windowVals)**2) # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2 if n > 1: Pxx = mean(Pxx, 1) freqs = Fs / NFFT * arange(numFreqs) Pxx.shape = len(freqs), return Pxx, freqs
def zeros_like(a): """Return an array of zeros of the shape and typecode of a.""" return zeros(a.shape,typecode(a))
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0): """ The cross spectral density Pxy by Welches average periodogram method. The vectors x and y are divided into NFFT length segments. Each segment is detrended by function detrend and windowed by function window. noverlap gives the length of the overlap between segments. The product of the direct FFTs of x and y are averaged over each segment to compute Pxy, with a scaling to correct for power loss due to windowing. Fs is the sampling frequency. NFFT must be a power of 2 Returns the tuple Pxy, freqs Refs: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) """ if NFFT % 2: raise ValueError, 'NFFT must be a power of 2' # zero pad x and y up to NFFT if they are shorter than NFFT if len(x) < NFFT: n = len(x) x = resize(x, (NFFT, )) x[n:] = 0 if len(y) < NFFT: n = len(y) y = resize(y, (NFFT, )) y[n:] = 0 # for real x, ignore the negative frequencies if typecode(x) == Complex: numFreqs = NFFT else: numFreqs = NFFT // 2 + 1 windowVals = window(ones((NFFT, ), typecode(x))) step = NFFT - noverlap ind = range(0, len(x) - NFFT + 1, step) n = len(ind) Pxy = zeros((numFreqs, n), Complex) # do the ffts of the slices for i in range(n): thisX = x[ind[i]:ind[i] + NFFT] thisX = windowVals * detrend(thisX) thisY = y[ind[i]:ind[i] + NFFT] thisY = windowVals * detrend(thisY) fx = fft(thisX) fy = fft(thisY) Pxy[:, i] = conjugate(fx[:numFreqs]) * fy[:numFreqs] # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2 if n > 1: Pxy = mean(Pxy, 1) Pxy = divide(Pxy, norm(windowVals)**2) freqs = Fs / NFFT * arange(numFreqs) Pxy.shape = len(freqs), return Pxy, freqs
def zeros_like(a): """Return an array of zeros of the shape and typecode of a.""" return zeros(a.shape, typecode(a))
def cohere_pairs(X, ij, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, preferSpeedOverMemory=True, progressCallback=donothing_callback, returnPxx=False): """ Cxy, Phase, freqs = cohere_pairs( X, ij, ...) Compute the coherence for all pairs in ij. X is a numSamples,numCols Numeric array. ij is a list of tuples (i,j). Each tuple is a pair of indexes into the columns of X for which you want to compute coherence. For example, if X has 64 columns, and you want to compute all nonredundant pairs, define ij as ij = [] for i in range(64): for j in range(i+1,64): ij.append( (i,j) ) The other function arguments, except for 'preferSpeedOverMemory' (see below), are explained in the help string of 'psd'. Return value is a tuple (Cxy, Phase, freqs). Cxy -- a dictionary of (i,j) tuples -> coherence vector for that pair. Ie, Cxy[(i,j) = cohere(X[:,i], X[:,j]). Number of dictionary keys is len(ij) Phase -- a dictionary of phases of the cross spectral density at each frequency for each pair. keys are (i,j). freqs -- a vector of frequencies, equal in length to either the coherence or phase vectors for any i,j key. Eg, to make a coherence Bode plot: subplot(211) plot( freqs, Cxy[(12,19)]) subplot(212) plot( freqs, Phase[(12,19)]) For a large number of pairs, cohere_pairs can be much more efficient than just calling cohere for each pair, because it caches most of the intensive computations. If N is the number of pairs, this function is O(N) for most of the heavy lifting, whereas calling cohere for each pair is O(N^2). However, because of the caching, it is also more memory intensive, making 2 additional complex arrays with approximately the same number of elements as X. The parameter 'preferSpeedOverMemory', if false, limits the caching by only making one, rather than two, complex cache arrays. This is useful if memory becomes critical. Even when preferSpeedOverMemory is false, cohere_pairs will still give significant performace gains over calling cohere for each pair, and will use subtantially less memory than if preferSpeedOverMemory is true. In my tests with a 43000,64 array over all nonredundant pairs, preferSpeedOverMemory=1 delivered a 33% performace boost on a 1.7GHZ Athlon with 512MB RAM compared with preferSpeedOverMemory=0. But both solutions were more than 10x faster than naievly crunching all possible pairs through cohere. See test/cohere_pairs_test.py in the src tree for an example script that shows that this cohere_pairs and cohere give the same results for a given pair. """ numRows, numCols = X.shape # zero pad if X is too short if numRows < NFFT: tmp = X X = zeros((NFFT, numCols), typecode(X)) X[:numRows, :] = tmp del tmp numRows, numCols = X.shape # get all the columns of X that we are interested in by checking # the ij tuples seen = {} for i, j in ij: seen[i] = 1 seen[j] = 1 allColumns = seen.keys() Ncols = len(allColumns) del seen # for real X, ignore the negative frequencies if typecode(X) == Complex: numFreqs = NFFT else: numFreqs = NFFT // 2 + 1 # cache the FFT of every windowed, detrended NFFT length segement # of every channel. If preferSpeedOverMemory, cache the conjugate # as well windowVals = window(ones((NFFT, ), typecode(X))) ind = range(0, numRows - NFFT + 1, NFFT - noverlap) numSlices = len(ind) FFTSlices = {} FFTConjSlices = {} Pxx = {} slices = range(numSlices) normVal = norm(windowVals)**2 for iCol in allColumns: progressCallback(i / Ncols, 'Cacheing FFTs') Slices = zeros((numSlices, numFreqs), Complex) for iSlice in slices: thisSlice = X[ind[iSlice]:ind[iSlice] + NFFT, iCol] thisSlice = windowVals * detrend(thisSlice) Slices[iSlice, :] = fft(thisSlice)[:numFreqs] FFTSlices[iCol] = Slices if preferSpeedOverMemory: FFTConjSlices[iCol] = conjugate(Slices) Pxx[iCol] = divide(mean(absolute(Slices)**2), normVal) del Slices, ind, windowVals # compute the coherences and phases for all pairs using the # cached FFTs Cxy = {} Phase = {} count = 0 N = len(ij) for i, j in ij: count += 1 if count % 10 == 0: progressCallback(count / N, 'Computing coherences') if preferSpeedOverMemory: Pxy = FFTSlices[i] * FFTConjSlices[j] else: Pxy = FFTSlices[i] * conjugate(FFTSlices[j]) if numSlices > 1: Pxy = mean(Pxy) Pxy = divide(Pxy, normVal) Cxy[(i, j)] = divide(absolute(Pxy)**2, Pxx[i] * Pxx[j]) Phase[(i, j)] = arctan2(Pxy.imag, Pxy.real) freqs = Fs / NFFT * arange(numFreqs) if returnPxx: return Cxy, Phase, freqs, Pxx else: return Cxy, Phase, freqs
def make_image(self): if self._A is not None: if self._imcache is None: if typecode(self._A) == UInt8: im = _image.frombyte(self._A, 0) else: x = self.to_rgba(self._A, self._alpha) im = _image.fromarray(x, 0) self._imcache = im else: im = self._imcache else: raise RuntimeError( 'You must first set the image array or the image attribute') bg = colorConverter.to_rgba(self.axes.get_frame().get_facecolor(), 0) if self.origin == 'upper': im.flipud_in() im.set_bg(*bg) im.is_grayscale = (self.cmap.name == "gray" and len(self._A.shape) == 2) im.set_aspect(self._aspectd[self._aspect]) im.set_interpolation(self._interpd[self._interpolation]) # image input dimensions numrows, numcols = im.get_size() im.reset_matrix() xmin, xmax, ymin, ymax = self.get_extent() dxintv = xmax - xmin dyintv = ymax - ymin # the viewport scale factor sx = dxintv / self.axes.viewLim.width() sy = dyintv / self.axes.viewLim.height() if im.get_interpolation() != _image.NEAREST: im.apply_translation(-1, -1) # the viewport translation tx = (xmin - self.axes.viewLim.xmin()) / dxintv * numcols #if flipy: # ty = -(ymax-self.axes.viewLim.ymax())/dyintv * numrows #else: # ty = (ymin-self.axes.viewLim.ymin())/dyintv * numrows ty = (ymin - self.axes.viewLim.ymin()) / dyintv * numrows l, b, widthDisplay, heightDisplay = self.axes.bbox.get_bounds() im.apply_translation(tx, ty) im.apply_scaling(sx, sy) # resize viewport to display rx = widthDisplay / numcols ry = heightDisplay / numrows if im.get_aspect() == _image.ASPECT_PRESERVE: if ry < rx: rx = ry # todo: center the image in viewport im.apply_scaling(rx, rx) else: im.apply_scaling(rx, ry) #print tx, ty, sx, sy, rx, ry, widthDisplay, heightDisplay im.resize(int(widthDisplay + 0.5), int(heightDisplay + 0.5), norm=self._filternorm, radius=self._filterrad) if self.origin == 'upper': im.flipud_in() return im
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0): """ The cross spectral density Pxy by Welches average periodogram method. The vectors x and y are divided into NFFT length segments. Each segment is detrended by function detrend and windowed by function window. noverlap gives the length of the overlap between segments. The product of the direct FFTs of x and y are averaged over each segment to compute Pxy, with a scaling to correct for power loss due to windowing. Fs is the sampling frequency. NFFT must be a power of 2 Returns the tuple Pxy, freqs Refs: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) """ if NFFT % 2: raise ValueError, 'NFFT must be a power of 2' # zero pad x and y up to NFFT if they are shorter than NFFT if len(x)<NFFT: n = len(x) x = resize(x, (NFFT,)) x[n:] = 0 if len(y)<NFFT: n = len(y) y = resize(y, (NFFT,)) y[n:] = 0 # for real x, ignore the negative frequencies if typecode(x)==Complex: numFreqs = NFFT else: numFreqs = NFFT//2+1 windowVals = window(ones((NFFT,),typecode(x))) step = NFFT-noverlap ind = range(0,len(x)-NFFT+1,step) n = len(ind) Pxy = zeros((numFreqs,n), Complex) # do the ffts of the slices for i in range(n): thisX = x[ind[i]:ind[i]+NFFT] thisX = windowVals*detrend(thisX) thisY = y[ind[i]:ind[i]+NFFT] thisY = windowVals*detrend(thisY) fx = fft(thisX) fy = fft(thisY) Pxy[:,i] = conjugate(fx[:numFreqs])*fy[:numFreqs] # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2 if n>1: Pxy = mean(Pxy,1) Pxy = divide(Pxy, norm(windowVals)**2) freqs = Fs/NFFT*arange(numFreqs) Pxy.shape = len(freqs), return Pxy, freqs
def make_image(self): if self._A is not None: if self._imcache is None: if typecode(self._A) == UInt8: im = _image.frombyte(self._A, 0) else: x = self.to_rgba(self._A, self._alpha) im = _image.fromarray(x, 0) self._imcache = im else: im = self._imcache else: raise RuntimeError('You must first set the image array or the image attribute') bg = colorConverter.to_rgba(self.axes.get_frame().get_facecolor(), 0) if self.origin=='upper': im.flipud_in() im.set_bg( *bg) im.is_grayscale = (self.cmap.name == "gray" and len(self._A.shape) == 2) im.set_aspect(self._aspectd[self._aspect]) im.set_interpolation(self._interpd[self._interpolation]) # image input dimensions numrows, numcols = im.get_size() im.reset_matrix() xmin, xmax, ymin, ymax = self.get_extent() dxintv = xmax-xmin dyintv = ymax-ymin # the viewport scale factor sx = dxintv/self.axes.viewLim.width() sy = dyintv/self.axes.viewLim.height() if im.get_interpolation()!=_image.NEAREST: im.apply_translation(-1, -1) # the viewport translation tx = (xmin-self.axes.viewLim.xmin())/dxintv * numcols #if flipy: # ty = -(ymax-self.axes.viewLim.ymax())/dyintv * numrows #else: # ty = (ymin-self.axes.viewLim.ymin())/dyintv * numrows ty = (ymin-self.axes.viewLim.ymin())/dyintv * numrows l, b, widthDisplay, heightDisplay = self.axes.bbox.get_bounds() im.apply_translation(tx, ty) im.apply_scaling(sx, sy) # resize viewport to display rx = widthDisplay / numcols ry = heightDisplay / numrows if im.get_aspect()==_image.ASPECT_PRESERVE: if ry < rx: rx = ry # todo: center the image in viewport im.apply_scaling(rx, rx) else: im.apply_scaling(rx, ry) #print tx, ty, sx, sy, rx, ry, widthDisplay, heightDisplay im.resize(int(widthDisplay+0.5), int(heightDisplay+0.5), norm=self._filternorm, radius=self._filterrad) if self.origin=='upper': im.flipud_in() return im
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, preferSpeedOverMemory=True, progressCallback=donothing_callback, returnPxx=False): """ Cxy, Phase, freqs = cohere_pairs( X, ij, ...) Compute the coherence for all pairs in ij. X is a numSamples,numCols Numeric array. ij is a list of tuples (i,j). Each tuple is a pair of indexes into the columns of X for which you want to compute coherence. For example, if X has 64 columns, and you want to compute all nonredundant pairs, define ij as ij = [] for i in range(64): for j in range(i+1,64): ij.append( (i,j) ) The other function arguments, except for 'preferSpeedOverMemory' (see below), are explained in the help string of 'psd'. Return value is a tuple (Cxy, Phase, freqs). Cxy -- a dictionary of (i,j) tuples -> coherence vector for that pair. Ie, Cxy[(i,j) = cohere(X[:,i], X[:,j]). Number of dictionary keys is len(ij) Phase -- a dictionary of phases of the cross spectral density at each frequency for each pair. keys are (i,j). freqs -- a vector of frequencies, equal in length to either the coherence or phase vectors for any i,j key. Eg, to make a coherence Bode plot: subplot(211) plot( freqs, Cxy[(12,19)]) subplot(212) plot( freqs, Phase[(12,19)]) For a large number of pairs, cohere_pairs can be much more efficient than just calling cohere for each pair, because it caches most of the intensive computations. If N is the number of pairs, this function is O(N) for most of the heavy lifting, whereas calling cohere for each pair is O(N^2). However, because of the caching, it is also more memory intensive, making 2 additional complex arrays with approximately the same number of elements as X. The parameter 'preferSpeedOverMemory', if false, limits the caching by only making one, rather than two, complex cache arrays. This is useful if memory becomes critical. Even when preferSpeedOverMemory is false, cohere_pairs will still give significant performace gains over calling cohere for each pair, and will use subtantially less memory than if preferSpeedOverMemory is true. In my tests with a 43000,64 array over all nonredundant pairs, preferSpeedOverMemory=1 delivered a 33% performace boost on a 1.7GHZ Athlon with 512MB RAM compared with preferSpeedOverMemory=0. But both solutions were more than 10x faster than naievly crunching all possible pairs through cohere. See test/cohere_pairs_test.py in the src tree for an example script that shows that this cohere_pairs and cohere give the same results for a given pair. """ numRows, numCols = X.shape # zero pad if X is too short if numRows < NFFT: tmp = X X = zeros( (NFFT, numCols), typecode(X)) X[:numRows,:] = tmp del tmp numRows, numCols = X.shape # get all the columns of X that we are interested in by checking # the ij tuples seen = {} for i,j in ij: seen[i]=1; seen[j] = 1 allColumns = seen.keys() Ncols = len(allColumns) del seen # for real X, ignore the negative frequencies if typecode(X)==Complex: numFreqs = NFFT else: numFreqs = NFFT//2+1 # cache the FFT of every windowed, detrended NFFT length segement # of every channel. If preferSpeedOverMemory, cache the conjugate # as well windowVals = window(ones((NFFT,), typecode(X))) ind = range(0, numRows-NFFT+1, NFFT-noverlap) numSlices = len(ind) FFTSlices = {} FFTConjSlices = {} Pxx = {} slices = range(numSlices) normVal = norm(windowVals)**2 for iCol in allColumns: progressCallback(i/Ncols, 'Cacheing FFTs') Slices = zeros( (numSlices,numFreqs), Complex) for iSlice in slices: thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol] thisSlice = windowVals*detrend(thisSlice) Slices[iSlice,:] = fft(thisSlice)[:numFreqs] FFTSlices[iCol] = Slices if preferSpeedOverMemory: FFTConjSlices[iCol] = conjugate(Slices) Pxx[iCol] = divide(mean(absolute(Slices)**2), normVal) del Slices, ind, windowVals # compute the coherences and phases for all pairs using the # cached FFTs Cxy = {} Phase = {} count = 0 N = len(ij) for i,j in ij: count +=1 if count%10==0: progressCallback(count/N, 'Computing coherences') if preferSpeedOverMemory: Pxy = FFTSlices[i] * FFTConjSlices[j] else: Pxy = FFTSlices[i] * conjugate(FFTSlices[j]) if numSlices>1: Pxy = mean(Pxy) Pxy = divide(Pxy, normVal) Cxy[(i,j)] = divide(absolute(Pxy)**2, Pxx[i]*Pxx[j]) Phase[(i,j)] = arctan2(Pxy.imag, Pxy.real) freqs = Fs/NFFT*arange(numFreqs) if returnPxx: return Cxy, Phase, freqs, Pxx else: return Cxy, Phase, freqs