def va(self, mode='p'): t, B, E = self.get('t'), self.get('B' + mode), self.get('E' + mode) bsd = signal.csd(B, B, fs=1/( t[1] - t[0] ), nperseg=t.size/2, noverlap=t.size/2 - 1)[1] esd = signal.csd(E, E, fs=1/( t[1] - t[0] ), nperseg=t.size/2, noverlap=t.size/2 - 1)[1] return np.sqrt( np.abs(esd/bsd) )
def calc_cospectrum(a,b,**kwargs): nfft_time = np.shape(a)[0] if 'nfft_time' in kwargs.keys(): nfft_time = kwargs['nfft_time'] nlon = np.shape(a)[1] fa = fft.fft(a,axis=1) fb = fft.fft(b,axis=1) nomega = nfft_time/2+1 nk = nlon/2+1 cfa = np.real(fa[:,:nk]) sfa = np.imag(fa[:,:nk]) cfb = np.real(fb[:,:nk]) sfb = np.imag(fb[:,:nk]) pp = np.zeros([nomega,nk]) pn = np.zeros([nomega,nk]) for i in range(nk): omega, pcacb = signal.csd(cfa[:,i],cfb[:,i],nperseg=nfft_time) omega, psasb = signal.csd(sfa[:,i],sfb[:,i],nperseg=nfft_time) omega, pcasb = signal.csd(cfa[:,i],sfb[:,i],nperseg=nfft_time) omega, psacb = signal.csd(sfa[:,i],cfb[:,i],nperseg=nfft_time) pp[:,i] = np.real(pcacb)+np.real(psasb)+np.imag(pcasb)-np.imag(psacb) pn[:,i] = np.real(pcacb)+np.real(psasb)-np.imag(pcasb)+np.imag(psacb) p_all = np.zeros([nomega*2,nk]) p_all[:nomega,:] = np.flipud(pn) p_all[nomega:,:] = pp sigma = 0.25/np.pi*nomega x = np.linspace(-nomega/2,nomega/2,nomega) gauss = np.exp(-x**2/(2*sigma**2)) gauss = gauss/np.sum(gauss) for i in range(nk): p_all[:,i] = np.convolve(p_all[:,i],gauss,mode='same') omega_all = np.concatenate((np.flipud(-omega),omega)) return p_all,omega_all
def test_nd_axis_m1(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((2,1,10)) f, p = csd(x, x, nperseg=10) assert_array_equal(p.shape, (2, 1, 6)) assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10) assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def comp(self, mode='p'): t = self.get('t') Bx, Bz, Ey = self.get('Bx'), self.get('Bz'), self.get('Ey') csdx = signal.csd(Bx, Ey, fs=1/( t[1] - t[0] ), nperseg=t.size/2, noverlap=t.size/2 - 1)[1] csdz = signal.csd(Bz, Ey, fs=1/( t[1] - t[0] ), nperseg=t.size/2, noverlap=t.size/2 - 1)[1] return np.abs(csdz/csdx)
def test_nd_axis_0(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((10,2,1)) f, p = csd(x, x, nperseg=10, axis=0) assert_array_equal(p.shape, (6,2,1)) assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10) assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = csd(x, x, 10, 'hann', 8) win = signal.get_window('hann', 8) fe, pe = csd(x, x, 10, win, 8) assert_array_almost_equal_nulp(p, pe) assert_array_almost_equal_nulp(f, fe)
def test_short_data(self): x = np.zeros(8) x[0] = 1 with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) f, p = csd(x, x) f1, p1 = csd(x, x, nperseg=8) assert_allclose(f, f1) assert_allclose(p, p1)
def test_empty_input_other_axis(self): for shape in [(3,0), (0,5,2)]: f, p = csd(np.empty(shape), np.empty(shape), axis=1) assert_array_equal(f.shape, shape) assert_array_equal(p.shape, shape) f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1) assert_array_equal(f.shape, (10,0,3)) assert_array_equal(p.shape, (10,0,3)) f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1) assert_array_equal(f.shape, (10,0,3)) assert_array_equal(p.shape, (10,0,3))
def test_short_data(self): x = np.zeros(8) x[0] = 1 #for string-like window, input signal length < nperseg value gives #UserWarning, sets nperseg to x.shape[-1] with suppress_warnings() as sup: sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8") f, p = csd(x, x, window='hann') # default nperseg f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning assert_allclose(f, f2) assert_allclose(p, p2) assert_allclose(f1, f2) assert_allclose(p1, p2)
def calc_csd(self): """ Calculate the cross spectral density using Scipy csd function. csd utilizes Welch's method to estimate spectral density. Data is split into overlapping segments. Each segment is windowed, then the cross spectral density is calculated using Fourier transforms. The results from all windows are averaged together to produce a lower variance estimate of the spectral density. A segment overlap factor of 2 is used (50% overlap). A one-sided spectrum is returned for real inputs The cross spectral density (units V**2/Hz) is calculated, as opposed to the cross spectrum (units V**2). """ # Calculate the sampling rate. Signal1 and signal2 must have the same # sampling rate. fs = 1 / np.mean(np.diff(self.signal1time[:1e4])) # If the number of points per segement is not specified, calculate the # number that gives approximately equal time and frequency resolution. if self.nperseg is None: self.nperseg = np.int(np.sqrt(2*len(self.signal1))) # Use next power of 2 for nperseg if specified. FFT algorithm is most # efficient when nperseg is a power of 2. if self.forcepower2 is True: self.nperseg = np.power(2, int(np.log2(self.nperseg-1))+1) # Calculate cross spectral density between signals 1 and self.freqs, self.csd = signal.csd(self.signal1, self.signal2, fs=fs, window=self.window, nperseg=self.nperseg, detrend=self.detrend) # Calculate auto spectral density of signal 1 _, self.asd1 = signal.csd(self.signal1, self.signal1, fs=fs, window=self.window, nperseg=self.nperseg, detrend=self.detrend) # Calculate auto spectral density of signal 2 _, self.asd2 = signal.csd(self.signal2, self.signal2, fs=fs, window=self.window, nperseg=self.nperseg, detrend=self.detrend) # Convert frequency units from Hz to kHz self.freqs /= 1000
def process_pal5_densdata(options): # Read and prep data backg = 400.0 data = numpy.loadtxt("data/ibata_fig7b_raw.dat", delimiter=",") sindx = numpy.argsort(data[:, 0]) data = data[sindx] data_lowerr = numpy.loadtxt("data/ibata_fig7b_rawlowerr.dat", delimiter=",") sindx = numpy.argsort(data_lowerr[:, 0]) data_lowerr = data_lowerr[sindx] data_uperr = numpy.loadtxt("data/ibata_fig7b_rawuperr.dat", delimiter=",") sindx = numpy.argsort(data_uperr[:, 0]) data_uperr = data_uperr[sindx] data_err = 0.5 * (data_uperr - data_lowerr) # CUTS indx = (data[:, 0] > options.minxi - 0.05) * (data[:, 0] < options.maxxi) data = data[indx] data_lowerr = data_lowerr[indx] data_uperr = data_uperr[indx] data_err = data_err[indx] # Compute power spectrum tdata = data[:, 1] - backg pp = Polynomial.fit(data[:, 0], tdata, deg=options.polydeg, w=1.0 / data_err[:, 1]) tdata /= pp(data[:, 0]) ll = data[:, 0] py = signal.csd(tdata, tdata, fs=1.0 / (ll[1] - ll[0]), scaling="spectrum", nperseg=len(ll))[1] py = py.real # Also compute the bispectrum Bspec, Bpx = bispectrum.bispectrum(numpy.vstack((tdata, tdata)).T, nfft=len(tdata), wind=7, nsamp=1, overlap=0) ppyr = numpy.fabs(Bspec[len(Bspec) // 2 + _BISPECIND, len(Bspec) // 2 :].real) ppyi = numpy.fabs(Bspec[len(Bspec) // 2 + _BISPECIND, len(Bspec) // 2 :].imag) return (numpy.sqrt(py * (ll[-1] - ll[0])), data_err[:, 1] / pp(data[:, 0]), ppyr, ppyi)
def process_mock_densdata(options): print ("Using mock Pal 5 data from %s" % options.mockfilename) # Read and prep data for mocks xvid = numpy.loadtxt(options.mockfilename) xv = xvid[:, :6] xv = xv[numpy.argsort(xvid[:, 6])] XYZ = bovy_coords.galcenrect_to_XYZ(xv[:, 0], xv[:, 1], xv[:, 2], Xsun=R0, Zsun=0.025) lbd = bovy_coords.XYZ_to_lbd(XYZ[0], XYZ[1], XYZ[2], degree=True) radec = bovy_coords.lb_to_radec(lbd[:, 0], lbd[:, 1], degree=True) xieta = pal5_util.radec_to_pal5xieta(radec[:, 0], radec[:, 1], degree=True) # make sure the progenitor is at (0,0) xieta[:, 0] -= numpy.median(xieta[:, 0]) xieta[:, 1] -= numpy.median(xieta[:, 1]) h, e = numpy.histogram(xieta[:, 0], range=[0.2, 14.3], bins=141) xdata = numpy.arange(0.25, 14.35, 0.1) # Compute power spectrum tdata = h - 0.0 pp = Polynomial.fit(xdata, tdata, deg=options.polydeg, w=1.0 / numpy.sqrt(h + 1.0)) tdata /= pp(xdata) ll = xdata py = signal.csd(tdata, tdata, fs=1.0 / (ll[1] - ll[0]), scaling="spectrum", nperseg=len(ll))[1] py = py.real # Also compute the bispectrum Bspec, Bpx = bispectrum.bispectrum(numpy.vstack((tdata, tdata)).T, nfft=len(tdata), wind=7, nsamp=1, overlap=0) ppyr = numpy.fabs(Bspec[len(Bspec) // 2 + _BISPECIND, len(Bspec) // 2 :].real) ppyi = numpy.fabs(Bspec[len(Bspec) // 2 + _BISPECIND, len(Bspec) // 2 :].imag) return (numpy.sqrt(py * (ll[-1] - ll[0])), numpy.sqrt(h + 1.0) / pp(xdata), ppyr, ppyi)
def test_nondefault_noverlap(self): x = np.zeros(64) x[::8] = 1 f, p = csd(x, x, nperseg=16, noverlap=4) q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., 1./6.]) assert_allclose(p, q, atol=1e-12)
def test_detrend_external_nd_0(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((2,1,10)) x = np.rollaxis(x, 2, 0) f, p = csd(x, x, nperseg=10, axis=0, detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_short_data(self): x = np.zeros(8) x[0] = 1 #for string-like window, input signal length < nperseg value gives #UserWarning, sets nperseg to x.shape[-1] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) f, p = csd(x, x, window='hann') # default nperseg f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning assert_allclose(f, f2) assert_allclose(p, p2) assert_allclose(f1, f2) assert_allclose(p1, p2)
def get_shift(regenerate=False): """Possibly needs work on robust linear fit, but close enough Shift given as number of samples""" if regenerate: plt.figure() for cast_num in np.r_[92:123]: C, sigma, theta, p, S, T = read_cast(cast_num) # If uncommented, the line below shifts temperature, and so # output graph should have data along horizontal line # S, T, theta = shift_T(C, theta, T, p) f, csd_f = csd(C, theta, fs=24, nperseg=256) plt.plot(f, np.angle(csd_f), color='k', alpha=0.7, marker='+', ls='none') # For checking shift fs, angles = get_xy_line_data(plt.gca(), sort_x=True) wts = (12.5 - fs)/12.5 p = np.polyfit(fs, angles, 1, w=wts) shift = p[0] plt.plot(fs, angles, 'k+') plt.plot(np.r_[0, 12.5], np.polyval(p, np.r_[0, 12.5])) print('To find shift, get slope of best fit line, then ×24/2pi') print('Linear fits do not work well enough here. Need an iterative' + 'procedure') return shift*24/(2*np.pi) else: return -0.42
def test_window_external(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = csd(x, x, 10, 'hann', 8) win = signal.get_window('hann', 8) fe, pe = csd(x, x, 10, win, nperseg=None) assert_array_almost_equal_nulp(p, pe) assert_array_almost_equal_nulp(f, fe) assert_array_equal(fe.shape, (5,)) # because win length used as nperseg assert_array_equal(pe.shape, (5,)) assert_raises(ValueError, csd, x, x, 10, win, nperseg=256) # because nperseg != win.shape[-1] win_err = signal.get_window('hann', 32) assert_raises(ValueError, csd, x, x, 10, win_err, nperseg=None) # because win longer than signal
def test_padded_freqs(self): x = np.zeros(12) y = np.ones(12) nfft = 24 f = fftpack.fftfreq(nfft, 1.0)[:nfft//2+1] f[-1] *= -1 fodd, _ = csd(x, y, nperseg=5, nfft=nfft) feven, _ = csd(x, y, nperseg=6, nfft=nfft) assert_allclose(f, fodd) assert_allclose(f, feven) nfft = 25 f = fftpack.fftfreq(nfft, 1.0)[:(nfft + 1)//2] fodd, _ = csd(x, y, nperseg=5, nfft=nfft) feven, _ = csd(x, y, nperseg=6, nfft=nfft) assert_allclose(f, fodd) assert_allclose(f, feven)
def test_integer_onesided_even(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8) assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, 0.11111111]) assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, 0.17072113]) assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftpack.fftfreq(8, 1.0)) q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889]) assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self): x = np.zeros(16, np.complex128) x[0] = 1.0 + 2.0j x[8] = 1.0 + 2.0j f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftpack.fftfreq(8, 1.0)) q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, 0.55555556, 0.55555556, 0.55555556, 0.38194444]) assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8, scaling='spectrum') assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, 0.02083333]) assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.15958227, 0.24193957, 0.24145224, 0.24100919, 0.24377353]) assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_even_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8) assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, 0.11111111], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, 0.17072113], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype)
def test_pad_shorter_y(self): x = np.zeros(12) y = np.zeros(8) f = np.linspace(0, 0.5, 7) c = np.zeros(7,dtype=np.complex128) f1, c1 = csd(x, y, nperseg=12) assert_allclose(f, f1) assert_allclose(c, c1)
def test_real_onesided_odd_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.15958227, 0.24193957, 0.24145224, 0.24100919, 0.24377353], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype)
def test_real_twosided_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftpack.fftfreq(8, 1.0)) q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype)
def test_complex_32(self): x = np.zeros(16, 'F') x[0] = 1.0 + 2.0j x[8] = 1.0 + 2.0j f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftpack.fftfreq(8, 1.0)) q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype, 'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def compute_directional_spectrum(self): """Calculate directional spectrum moments for direction and spread""" # suppress divide by 0 warning np.seterr(divide='ignore', invalid='ignore') f11, p11 = signal.csd(self.elev_proc, self.elev_proc, fs=self.fs, nperseg=IMU_nperseg, noverlap=IMU_noverlap, nfft=IMU_nfft, return_onesided=True) _, p22 = signal.csd(self.pitch_proc, self.pitch_proc, fs=self.fs, nperseg=IMU_nperseg, noverlap=IMU_noverlap, nfft=IMU_nfft, return_onesided=True) _, p33 = signal.csd(self.roll_proc, self.roll_proc, fs=self.fs, nperseg=IMU_nperseg, noverlap=IMU_noverlap, nfft=IMU_nfft, return_onesided=True) _, p12 = signal.csd(self.elev_proc, self.pitch_proc, fs=self.fs, nperseg=IMU_nperseg, noverlap=IMU_noverlap, nfft=IMU_nfft, return_onesided=True) _, p13 = signal.csd(self.elev_proc, self.roll_proc, fs=self.fs, nperseg=IMU_nperseg, noverlap=IMU_noverlap, nfft=IMU_nfft, return_onesided=True) _, p23 = signal.csd(self.pitch_proc, self.roll_proc, fs=self.fs, nperseg=IMU_nperseg, noverlap=IMU_noverlap, nfft=IMU_nfft, return_onesided=True) # calculate omega, k, and k0 omega = 2 * np.pi * f11 g = 9.81 k0 = omega**2 / g self.k = np.sqrt((p22 + p33) / p11) self.R = self.k / k0 # now calculate circular moments self.freq = f11 self.a0 = p11 self.a1 = -np.imag(p12) / self.k self.b1 = -np.imag(p13) / self.k self.a2 = (p22 - p33) / (self.k**2) self.b2 = 2 * np.real(p23) / (self.k**2)
rando=1000*np.random.rand(n) tau,corr = gc.get_corr(timeB_s[tindex1:tindex2],Br7_dtr[tindex1:tindex2],Br9_dtr[tindex1:tindex2],normalized=False) #tau,corr = gc.get_corr(np.arange(22250),Br7_dtr[tindex1:tindex2],Br7_dtr[tindex1:tindex2],normalized=False) f,f0,compr7,pwrbr7,mag1,phase1,cos_phase1,interval=spec.spectrum_wwind(Br7_dtr[tindex1:tindex2],timeB_s[tindex1:tindex2],window='None') f,f0,compr9,pwrbr9,mag1,phase1,cos_phase1,interval=spec.spectrum_wwind(Br9_dtr[tindex1:tindex2],timeB_s[tindex1:tindex2],window='None') #f,f0,compran,pwrran,mag1,phase1,cos_phase1,interval=spec.spectrum_wwind(rando,timeB_s[tindex1:tindex2],window='None') interval = timeB_s[1]-timeB_s[0] factor = 2.0/(n*interval) cross_spec = np.conj(compr7)*compr9*factor crossf, cross_csd = sps.csd(Br7_dtr[tindex1:tindex2],Br9_dtr[tindex1:tindex2],fs=(1.0/interval),nperseg=nper) #crossf2, cross_csd2 = sps.csd(Br7_dtr[tindex1:tindex2],Br9_dtr[tindex1:tindex2],fs=(1.0/interval),nperseg=11125) crossf_auto, autobr7_csd = sps.csd(Br7_dtr[tindex1:tindex2],Br7_dtr[tindex1:tindex2],fs=(1.0/interval),nperseg=nper)#scaling='density') crossf_auto, autobr9_csd = sps.csd(Br9_dtr[tindex1:tindex2],Br9_dtr[tindex1:tindex2],fs=(1.0/interval),nperseg=nper)#scaling='spectrum') autobr7 = np.conj(compr7)*compr7*factor autobr7_re = np.real(autobr7) cross_phase = np.angle(cross_spec[0:nper/2+1]) cross_phase_csd = np.angle(cross_csd) cross_coh = ((np.abs(cross_spec[0:nper/2+1]))**2)/(factor*factor*pwrbr7*pwrbr9) crossf, coh_csd = sps.coherence(Br7_dtr[tindex1:tindex2],Br9_dtr[tindex1:tindex2],fs=(1.0/interval),nperseg=nper)#,nperseg=11126) spec_corrtot = 0 for ff in np.arange(len(crossf)): spec_corrtot=spec_corrtot+(np.sqrt(coh_csd[ff]) *np.cos(cross_phase_csd[ff]) *np.sqrt(autobr7_csd[ff])
def FDDsvp(data, fs, df=0.01, pov=0.5, window='hann'): ndat = data.shape[0] #NUMERO DI DATI CAMPIONATI nch = data.shape[1] #NUMERO DI CANALI ACQUISITI freq_max = fs / 2 # Frequenza di Nyquist nxseg = fs / df # numero di punti per segmenti (su cui mediare) # nseg = ndat // nxseg # numero di segmenti (su cui mediare) noverlap = nxseg // ( 1 / pov ) # Numero di punti che si sovrappongono tra i segmenti (Default 50%) PSD_matr = np.zeros((nch, nch, int((nxseg) / 2 + 1)), dtype=complex) # Inizializzo la SD matrix S_val = np.zeros( (nch, nch, int((nxseg) / 2 + 1))) # Inizializzo la matrice dove salverò i Singular Values S_vec = np.zeros( (nch, nch, int((nxseg) / 2 + 1)), dtype=complex ) # Inizializzo la matrice dove salverò i Singular Vectors # loop dove mi calcolo le Auto e Cross-Spectral Density # (si passa al dominio della frequenza) for _i in range(0, nch): for _j in range(0, nch): _f, _Pxy = signal.csd(data[:, _i], data[:, _j], fs=fs, nperseg=nxseg, noverlap=noverlap, window=window) PSD_matr[_i, _j, :] = _Pxy # loop dove mi calcolo i singular value for _i in range(np.shape(PSD_matr)[2]): U1, S1, _V1_t = np.linalg.svd(PSD_matr[:, :, _i]) U1_1 = np.transpose(U1) S1 = np.diag(S1) S1rad = np.sqrt(S1) S_val[:, :, _i] = S1rad S_vec[:, :, _i] = U1_1 # Plot dei singular values (in scala logaritmica) fig, ax = plt.subplots() for _i in range(nch): # ax.semilogy(_f, S_val[_i, _i]) # scala log ax.plot(_f[:], 10 * np.log10(S_val[_i, _i])) # decibel ax.grid() ax.set_xlim(left=0, right=freq_max) ax.xaxis.set_major_locator(MultipleLocator(freq_max / 10)) ax.xaxis.set_major_formatter(FormatStrFormatter('%g')) ax.xaxis.set_minor_locator(MultipleLocator(freq_max / 100)) ax.set_title("Singular values plot - (Freq. res. ={0})".format(df)) ax.set_xlabel('Frequency [Hz]') ax.set_ylabel(r'dB $[g^2/Hz]$') # ax.set_ylabel(r'dB $\left[\frac{\left(\frac{m}{s^2}\right)^2}{Hz}\right]$') mplcursors.cursor() Results = {} Results['Data'] = {'Data': data} Results['Data']['Samp. Freq.'] = fs Results['Data']['Freq. Resol.'] = df Results['Singular Values'] = S_val Results['Singular Vectors'] = S_vec Results['PSD Matrix'] = PSD_matr return fig, Results
def Coherency(data, fs_welch=20, nps=250, nover=230): '''From a matrix of time series calculates a matrix of coherenecy between each series. For two signals (time series) x(t) and y(t) the coherency is defined by Pxy/(PxxPyy)^{1/2} where Pxy, Pxx, and Pyy are the power spectral density. The Welch's method with Hanh funtions for the windows are used for this. For time-dependet coherency use the parameter point_interval. Parameters ---------- data : Matrix with the time series Rows -> signals Columns -> time fs_welch: Sampling (domain) frecuency of power spectral density. Default 20. points_interval: A divisor of columns number. If 0 the function only returns stationary coherency. Default 0. Returns ------- f: Real array (n,) of frecuency domain where n its the number of windows generate by Hanh method and the values are between 0 and fs/2. coherency: Complex array (nRows, nRows, n). The element coherency[i,j,k] is the of coherency between the i signal and j signal evaluated in the frecuency Tf[k]. Tf: Only if points_interval != 0. Real array (n,) of frecuency domain where n its the number of windows generate by Hanh method and the values are between 0 and fs/2. Tcoherency: Only if points_interval != 0. Complex array (nRows, nRows, n, n_interval) where n_interval = nColumns/points_interval. The element Tcoherency[i,j,k,z] is the of coherency between the i signal and j signal evaluated in the frecuency Tf[k] for z interval. ''' N = data.shape[0] #Stationary coherency f = signal.csd(data[0, :], data[0, :], fs=fs_welch, noverlap=nover, nperseg=nps)[0] Nf = f.shape[0] coherency = np.zeros((N, N, Nf), complex) for x in np.arange(N): for y in np.arange(N): if x >= y: f1, Pxy = signal.csd(data[x, :], data[y, :], fs=fs_welch, noverlap=nover, nperseg=nps) Pxx = np.abs( signal.csd(data[x, :], data[x, :], fs=fs_welch, noverlap=nover, nperseg=nps)[1] ) # Each auto power spectra have imaginary part equal to 0 Pyy = np.abs( signal.csd(data[y, :], data[y, :], fs=fs_welch, noverlap=nover, nperseg=nps)[1]) coherency[x, y, :] = Pxy / (Pxx * Pyy)**(1 / 2) coherency[y, x, :] = Pxy / (Pxx * Pyy)**(1 / 2) # I'm not shure if for same fs and kind of windows the array f is the same if (f1 != f).any(): return True return f, coherency
Y[0, 0:errorLen]))) '''figure() plot(data[trainLen+1:trainLen+errorLen+1]) #plot(data_mix[trainLen+1:trainLen+errorLen+1]) plot(Y[0,0:errorLen]) show()''' ######################weiner####################### M = 1000 #len of weiner filter s = inp1[trainLen + 1:trainLen + errorLen + 1] s2 = inp2[trainLen + 1:5 * trainLen + errorLen + 1] x = sqrt(mix) * inp1[trainLen + 1:trainLen + errorLen + 1] + sqrt( 1 - mix) * inp2[trainLen + 1:trainLen + errorLen + 1] f, Pxx = sig.csd(x, x, nperseg=M) f, Psx = sig.csd(s, x, nperseg=M) f, Pss = sig.csd(s, s, nperseg=M) f, Ps2s2 = sig.csd(s2, s2, nperseg=M) H = Psx / Pxx Om = np.linspace(0, np.pi, num=len(H)) H = Psx / Pxx H = H * np.exp(-1j * 2 * np.pi / len(H) * np.arange(len(H)) * (len(H) // 2)) # shift for causal filter h = np.fft.irfft(H) y = np.convolve(x, h, mode='same') x1in = s xsigin = x
def singleTrial(trial, subTrial): #**************************************** #******* Simulation settings and variables #**************************************** # Simulation duration = 11000 tmin = 1000 dt = 0.05 simTypes = ['dc', 'rc'] fs = 1 / (dt * 1e-3) # Files and paths dataPath = ( '/home/pablo/osf/Master-Thesis-Data/population/psd/cancel/trial' + str(trial) + '/trial' + str(subtrial)) gsyn = {'dc': [], 'rc': []} #**************************************** #******* Running simulation for each case #**************************************** for j, simType in enumerate(simTypes): # Variables calculated gsynMG = [] gsynSOL = [] taux = [] # Inputs plots labels = {'rc': 'Entradas inibitórias', 'dc': 'Entradas excitatórias'} symbols = {'rc': 'k', 'dc': 'k--'} #**************************************** #******* Getting and processing for analysis #**************************************** fileName = dataPath + '/gsyn' + simType + '.dat' f = open(fileName, 'r') lines = f.readlines() for line in lines: # RC effect taux.append(float(line.split()[0])) gsynMG.append(float(line.split()[1])) gsynSOL.append(float(line.split()[2])) f.close() # Membrane potentials from pools and EMG #plt.figure() #plt.plot(gsynMG, label='MG') #plt.plot(gsynSOL, label='SOL') #plt.legend() #plt.title(simType) #plt.show() #**************************************** #******* Gathering data for latter use #**************************************** staticInput = [y for x, y in enumerate(gsynSOL) if taux[x] > tmin] t = [y for x, y in enumerate(taux) if taux[x] > tmin] gsyn[simType] = staticInput #for simType in simTypes: # plt.figure() # plt.plot(t, gsyn[simType], symbols[simType], label=labels[simType]) # plt.legend() # plt.xlabel('Tempo (ms)') # plt.ylabel('Voltagem (mV)') # plt.grid() #print('Mean conductance, in module, is {:.2f}'.format(abs(np.mean( # gsyn['dc'])))) #**************************************** #******* Computing PSD and coherences #**************************************** fr = 1 nperseg = 4 * fs / 2 / fr noverlap = None nfft = None #8*nperseg #detrend = 'constant' detrend = False #detrend = 'linear' scale = 'spectrum' limits = {'rc': 0.0002, 'dc': 1} for simType in simTypes: # Plot inputs PSD ff, PSD = signal.welch(gsyn[simType], fs, 'hann', nperseg, noverlap, nfft, detrend, scaling=scale) #plt.figure() #plt.plot(ff, PSD, symbols[simType], label=labels[simType]) #plt.ylim([0, limits[simType]]) #plt.xlim([0, 50]) #plt.legend() #plt.xlabel('Frequência (Hz)') #plt.ylabel('Densidade espectral de potência (mN$^2$)') #plt.grid() # Plot coherence between inputs # Here is higher than in example because I needed more resolution to # compare f to 10 Hz nperseg = 20000 fc, coherence = signal.coherence(gsyn['rc'], gsyn['dc'], fs, 'hann', nperseg, noverlap, nfft, detrend) #plt.figure() #print('nsamples: {:}'.format(len(gsyn['rc']))) #print('nperseg: {:}'.format(nperseg)) #print('nwindows: {:.1f}'.format(len(gsyn['rc'])/nperseg)) #import pdb; pdb.set_trace() #plt.plot(fc, coherence, symbols[simType], label=labels[simType]) #plt.xlabel('Frequência (Hz)') #plt.ylabel('Coerência córtico-muscular') #plt.grid() #plt.xlim([0, 50]) #plt.legend() # cross spectral density used to study coherence phase _, crossSpectrum = signal.csd(gsyn['rc'], gsyn['dc'], fs, 'hann', nperseg, noverlap, nfft, detrend) crossSpectrum = np.angle(crossSpectrum, deg=False) # Cross spectral density plot #plt.figure() #plt.plot(fc, crossSpectrum, 'k') #plt.xlabel('Frequência (Hz)') #plt.ylabel('Pxy fase (graus)') #plt.grid() #plt.xlim([0, 50]) #plt.show() return fc, crossSpectrum
specgram1 = get_specgram(chname1, remake=True, fftlength=2**6, **kwargs) specgram2 = get_specgram(chname2, remake=True, fftlength=2**6, **kwargs) csd_specgram = get_csd_specgram(chname1, chname2, remake=True, fftlength=2**6, **kwargs) # calc scipy fs = 1. / timeseries1.dt fs = fs.value x = timeseries1.value y = timeseries2.value nperseg = int(fftlength * fs) f, csd_scipy = signal.csd(x, y, fs, nperseg=nperseg, noverlap=0) mag_scipy = np.abs(csd_scipy) angle_scipy = np.angle(csd_scipy) # calc gwpy csd_specgram = timeseries1.csd_spectrogram(timeseries2, stride=fftlength, fftlength=fftlength, overlap=0, window='hanning', nproc=2) mag_gwpy = csd_specgram.mean(axis=0).abs() angle_gwpy = csd_specgram.mean(axis=0).angle() plot, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(8, 6)) ax0.plot(mag_gwpy, label='gwpy')
def dens_and_power(data, err=None, bkg=0., degree=3, nbins=50, xirange=[-20, 0], scaling='spectrum', col='k', plot=False, verbose=False): #data[:,0] = data[:,0]*180./np.pi #plt.subplot(1,2,1) t0 = time.time() #counts, bins, patches = plt.hist(data[:,0],bins=np.linspace(xirange[0], xirange[1], nbins), histtype='step', lw=2, color='k') counts, bins = np.histogram(data[:, 0], bins=np.linspace(xirange[0], xirange[1], nbins)) counts = counts + np.random.poisson(bkg, size=len(counts)) centroids = (bins[1:] + bins[:-1]) / 2. if err == None: err = np.sqrt(counts) #plt.errorbar(centroids, counts, yerr=err, capthick=0, c='k') #x = numpy.linspace(min(bins), max(bins), 100) pp = Polynomial.fit(centroids, counts - bkg, deg=degree, w=1. / err) #plt.plot(x,pp(x),'-', c=col) tdata = (counts - bkg) / pp(centroids) terr = err / pp(centroids) t1 = time.time() if verbose == True: print("time for tdata and terr:", t1 - t0) t0 = time.time() from scipy import signal px, py = signal.csd(tdata, tdata, fs=1. / (centroids[1] - centroids[0]), scaling=scaling, nperseg=len(centroids)) py = py.real px = 1. / px py = numpy.sqrt(py * (centroids[-1] - centroids[0])) t1 = time.time() if verbose == True: print("time for power spectrum:", t1 - t0) # Perform simulations of the noise to determine the power in the noise t0 = time.time() nerrsim = 10000 ppy_err = numpy.empty((nerrsim, len(px))) for ii in range(nerrsim): tmock = terr * numpy.random.normal(size=len(centroids)) ppy_err[ii] = signal.csd(tmock, tmock, fs=1. / (centroids[1] - centroids[0]), scaling=scaling, nperseg=len(centroids))[1].real py_err = numpy.sqrt( numpy.median(ppy_err, axis=0) * (centroids[-1] - centroids[0])) pcut = 0.0 # Only trust points above this, then remove noise - Jo has 0.4 t1 = time.time() if verbose == True: print("time for error analysis", t1 - t0) if plot == True: plt.subplot(111) loglog(px[py > pcut], numpy.sqrt(py[py > pcut]**2. - py_err[py > pcut]**2.), marker='o', zorder=0, ls='none', markersize=5, color=col) errorbar(px[(py < pcut)], numpy.amax(numpy.array([py, py_err]), axis=0)[py < pcut], yerr=numpy.array( [.1 + 0. * px[(py < pcut)], .1 + 0. * px[(py < pcut)]]), uplims=True, capthick=2., ls='none', color='k', zorder=0) loglog(px, py_err, lw=2., color=col, alpha=0.5, zorder=-2) return tdata, terr, px, py, py_err
x2 = np.array(x) y2 = np.array(y) fx, xPower = signal.welch(x, fs, nperseg=windowSize, noverlap=0, scaling="spectrum") fy, yPower = signal.welch(y, fs, nperseg=windowSize, noverlap=0, scaling="spectrum") fxy, xyPower = signal.csd(x, y, fs, nperseg=windowSize, noverlap=0, scaling="spectrum") welchCoh1 = np.power(np.absolute(xyPower), 2) welchCoh2 = xPower * yPower welchCoh = welchCoh1 / welchCoh2 f, Cxy = signal.coherence(x, y, fs, nperseg=windowSize, noverlap=N / 200) f2, Cxy2 = signal.coherence(x, y, fs, nperseg=windowSize, noverlap=0) # my method powXX = np.zeros(shape=(fftSize), dtype="complex") powYY = np.zeros(shape=(fftSize), dtype="complex") powXY = np.zeros(shape=(fftSize), dtype="complex") # win
dataframe2 = pd.read_csv( "/content/gdrive/My Drive/Colab Notebooks/CSIRO/iladata_m10dBm.csv") data2_adc_01 = dataframe2[[ 'Sample in Buffer', 'Sample in Window', 'TRIGGER', 'design_1_i/usp_rf_data_converter_0_m20_axis_tdata[63:0]', 'design_1_i/usp_rf_data_converter_0_m20_axis_tvalid' ]] data2_adc_01 = data2_adc_01.drop(dataframe.index[[0]]) data2_adc_01 = data2_adc_01.reset_index(drop=True) data2_d1 = data_signed_int( data2_adc_01, 'design_1_i/usp_rf_data_converter_0_m20_axis_tdata[63:0]') data2_d1 = data2_d1['var_names'] slice_data2_df1 = np.array(data2_d1, dtype=float) f, Pxy = signal.csd(slice1, slice_data2_df1, fs, nperseg=1024, noverlap=512) plt.semilogy(f, np.abs(Pxy)) plt.xlabel('frequency [Hz]') plt.ylabel('CSD [V**2/Hz]') plt.show() """A CSD with a spike at some frequency (F) Hertz indicates that the pair of signals is periodically correlated every 1/F seconds. The correlation may be due to a resonant frequency at F. ##### Serial Correlation """ def autocorr(x): result = np.correlate(x, x, mode='full') return int(result[result.size / 2:])
def test_detrend_external_nd_m1(self): x = np.arange(40, dtype=np.float64) + 0.04 x = x.reshape((2,2,10)) f, p = csd(x, x, nperseg=10, detrend=lambda seg: signal.detrend(seg, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external(self): x = np.arange(10, dtype=np.float64) + 0.04 f, p = csd(x, x, nperseg=10, detrend=lambda seg: signal.detrend(seg, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self): x = np.arange(10, dtype=np.float64) + 0.04 f1, p1 = csd(x, x, nperseg=10, detrend=False) f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x) assert_allclose(f1, f2, atol=1e-15) assert_allclose(p1, p2, atol=1e-15)
def fdbf(shotGather, weightType='none', steeringVector='plane', numv=2048, min_vel=1, max_vel=1000, min_frequency=5, max_frequency=100): # Ensure that min_velocity is greater than zero for numerical stability if min_vel < 1: min_vel = 1 # Spatiospectral correlation matrix......................................... R = np.zeros(((shotGather.n_samples / 2 + 1), shotGather.n_channels, shotGather.n_channels), complex) for m in range(shotGather.n_channels): for n in range(shotGather.n_channels): freq, R[:, m, n] = signal.csd(shotGather.timeHistories[:, m], shotGather.timeHistories[:, n], fs=1.0 / shotGather.dt, window='boxcar', nperseg=shotGather.n_samples) # Remove frequencies above/below specificied max/min frequencies and downsample (if required by zero padding) fminID = np.argmin(np.absolute(freq - min_frequency)) fmaxID = np.argmin(np.absolute(freq - max_frequency)) freq_id = range(fminID, (fmaxID + 1), shotGather.multiple) R = R[freq_id, :, :] freq = freq[freq_id] # Weighting matrices........................................................ W = np.zeros(np.shape(R)) # Sqare root of distance from source if str.lower(weightType) == 'sqrt': W[:, :, :] = np.diag( np.sqrt(abs(shotGather.offset) + shotGather.position)) # 1/|A(f,x)|, where A is Fourier Transform of a(t,x) elif str.lower(weightType) == 'invamp': freqFFT = np.concatenate([ np.arange(0, shotGather.fnyq + shotGather.df, shotGather.df), np.arange(-(shotGather.fnyq - shotGather.df), 0, shotGather.df) ]) Af = np.fft.fft(shotGather.timeHistories, axis=0) for bb in range(len(freq)): freq_id = np.argmin(np.absolute(freqFFT - freq[bb])) weight = 1.0 / np.absolute(Af[freq_id, :]) W[bb, :, :] = np.diag(weight) # No weighting else: W[:, :, :] = np.eye(shotGather.n_channels) # Beamforming............................................................... v_vals = np.linspace(min_vel, max_vel, numv) # Pre-allocate variables for efficiency power = np.zeros((numv, len(freq)), complex) pnorm = np.zeros(np.shape(power), complex) v_peak = np.zeros(np.shape(freq)) # Loop through all frequency values, compute power at all trial wavenumbers for m in range(len(freq)): # Convert trial velocities to wavenumbers (set equal to 0 for k > kres) k_vals = 2 * np.pi * freq[m] / v_vals alias_id = np.where(k_vals > shotGather.kres)[0] # Weighting matrix for current frequency Wf = W[m, :, :] for k in range(numv): # Steering vector if str.lower(steeringVector) == 'cylindrical': pos = shotGather.position # If x[0]=0, set equal to arbitrarilly small number for stability if pos[0] == 0: pos[0] = 1e-16 H0 = special.j0( k_vals[k] * pos) + 1j * special.y0(k_vals[k] * pos) expterm = np.exp(1j * np.angle(H0)) else: expterm = np.exp(1j * k_vals[k] * shotGather.position) # power[k,m] = expterm' * Wf * R[m,:,:] * Wf' * expterm power[k, m] = np.dot( np.dot( np.dot(np.dot(np.conj(expterm).transpose(), Wf), R[m, :, :]), Wf.transpose()), expterm) power[alias_id, m] = 0 # Index of wavenumber corresponding to maximum power at freq[m] max_id = np.argmax(np.abs(power[:, m])) # Normalize all power values at freq[m] by the maximum power at freq[m] pnorm[:, m] = np.abs(power[:, m]) / np.max(np.abs(power[:, m])) pnorm[alias_id, m] = float('nan') # Wavenumber corresponding to max power at freq[m] v_peak[m] = v_vals[max_id] # Create instance of DispersionPower class dispersionPower = dctypes.DispersionPower(freq, v_peak, v_vals, 'velocity', shotGather.kres, pnorm) return dispersionPower
print('\nSegunda tarefa') # (a) print('\n(a)') # Carregar o arquivo data = loadmat('../data/tc2ex2.mat') print("Elementos do arquivo tc2ex2.mat: ") print([key for key in data.keys()]) Fs = data['Fs'][0][0] x = np.array(data['input']) y = np.array(data['output']) fxy,Sxy = csd(x.transpose(),y.transpose(),fs=Fs,scaling='spectrum',nperseg=100000) fxx,Sxx = csd(x.transpose(),x.transpose(),fs=Fs,scaling='spectrum',nperseg=100000) H = Sxy[0]/Sxx[0] if all(fxy==fxx): f = fxx else: raise ValueError(u'As frequencias amostradas de Sxy e Sxx nao sao correspondentes, verificar.') fig1,axs = plt.subplots(2,1) #plt.xlabel(u'Frequências amostradas [Hz]') axs[0].semilogx(f,20*np.log10(np.abs(H))) axs[0].set_ylabel(r'$|\hat{H}(jw)|$ [dB]') # Resposta em frequência estimada') axs[0].set_xlim(xmax=100) axs[0].set_ylim(ymin=-75) axs[0].grid(which='both')
def mainFDD(inputMatrix,sampleFreq,title,figTitle,FDDsolverTitle, peakThresh=-10000, frequencyThresh = 30, writeToFile = 0): # Frequency Domain Decomposition # Input: # inputMatrix - [m,n] matrix where each column n contains m sample measurements from one channel # sampleFreq - Number of samples per second in Hz # peakThresh - Only the peaks with values above this threshold will be enumerated. # # Output: # Frequencies - Array of frequencies corresponding to the FDD-plot # dbs1 - Array of the absolute value of the identified singular values given in decibel. # chosenPeaksFreq - Array of the frequencies corresponding to the peaks chosen by the user # chosenPeaksMag - Array of the magnitude of the peaks chosen by the user # chosenPeaksMS - Matrix [n,k] containing k modeshapes. Each row in n corresponds to the channel position n in inputMatrix. #Allocating space rows = np.size(inputMatrix,1) cols = np.size(inputMatrix,1) trial = csd(inputMatrix[:,0],inputMatrix[:,1],sampleFreq) depth = len(trial[0]) PSD_matrix = np.empty((rows,cols,depth),dtype='complex64') freq_matrix = np.empty((rows,cols,depth),dtype='float') Frequencies = np.empty(np.size(freq_matrix,2),dtype='float') Frequencies[:] = trial[0] #Compute PSD matrix, PSD_matrix[i,j,k] where [i,j,:] contain the cross-spectra density of input channel i and j. # Each k corresponds to a frequency step derived from the given sample rate, so there is a 2D PSD matrix for each frequency step k. for i in range(np.size(inputMatrix,1)): for j in range(np.size(inputMatrix,1)): f,Pxy = csd(inputMatrix[:,i],inputMatrix[:,j],sampleFreq) PSD_matrix[i,j,:] = Pxy freq_matrix[i,j,:] = f #Allocating space testMat = PSD_matrix[:,:,1] testSVD = svd(testMat) u_svd = testSVD[0] s1 = np.empty(np.size(PSD_matrix,2),dtype='float') s2 = np.empty(np.size(PSD_matrix,2),dtype='float') ms = np.empty((np.size(u_svd,0),np.size(PSD_matrix,2)),dtype='complex64') ms2 = np.empty((np.size(u_svd,0),np.size(PSD_matrix,2)),dtype='complex64') #Performing Singular-value decomposition on the PSD-matrix. #By default, based on the assumption that the vibration of frequency k is dominated by a single mode, #only the first and most prominent singular value, s1 ,is collected. The mode shape corresponding to s1 is collected in ms. for i in range(np.size(PSD_matrix,2)): u,s,vh = svd(PSD_matrix[:,:,i]) s1[i] = s[0] s2[i] = s[1] ms[:,i] = u[:,0] #If the second singular values are to be examined. #s2[i] = s[1] #ms2[:,i] = u[:,1] #Creating array of magnitudes in decibel. dbs1 = np.empty(len(s1),dtype='float') for i in range(len(s1)): dbs1[i] = 5*np.log10(np.abs(s1[i])) #Simple peak identification. If a value is larger than both its neighbours, it is determined to be a peak. maxList =[] maxList_pos = [] for i in range(1,len(s1)-1): if(s1[i-1] < s1[i] and s1[i+1]<s1[i] and s1[i] >= peakThresh): maxList.append(s1[i]) maxList_pos.append(i) peakFreq = Frequencies[maxList_pos] peakMag = s1[maxList_pos] #Mark peak with both circle and number, where the number can be used later for peak identification. xs = peakFreq ys = peakMag index = [] for i in range(1,len(ys)+1): index.append(i) maxListDB =[] maxList_posDB = [] #This is a list of the index i at where the max frequencies are identified. for i in range(1,len(dbs1)-1): if(dbs1[i-1] < dbs1[i] and dbs1[i+1]<dbs1[i] and dbs1[i] >= peakThresh and Frequencies[i]< frequencyThresh): maxListDB.append(dbs1[i]) maxList_posDB.append(i) peakFreqDB_globalIndex = np.empty((len(maxList_posDB),2),dtype=float) peakMagDB = np.empty(len(maxList_posDB),dtype=float) peakMS = np.empty((len(ms),len(maxList_posDB)),dtype = complex) for i in range(len(maxList_posDB)): peakFreqDB_globalIndex[i,0] = maxList_posDB[i] peakFreqDB_globalIndex[i,1] = Frequencies[maxList_posDB[i]] #PeakfreqDb now contain the primary [globalIndex,thatpeakFrequency] peakMagDB[i] = dbs1[maxList_posDB[i]] peakMS[:,i] = ms[:,maxList_posDB[i]] peakFrequencies = peakFreqDB_globalIndex[:,1] indexDB = [] for i in range(1,len(peakMagDB)+1): indexDB.append(i) peakFreqDataframe = pd.DataFrame(peakFrequencies,columns=['Frequencies[Hz]']) peakFreqDataframe.index +=1 peakFreqDataframe.index.name = "#" peakFreqDataframe.columns.name = 'Peak no.' print(peakFreqDataframe) #Plot plot of the 1st singular values suggestedValuesPlotTitle = FDDsolverTitle + "_Suggested 1st singular values, peak treshold = " + str(peakThresh) plt.figure(suggestedValuesPlotTitle) plt.plot(Frequencies,dbs1) plt.title(suggestedValuesPlotTitle,size = 16) for i in range(len(peakFrequencies)): plt.text(peakFrequencies[i], peakMagDB[i], '%s' % indexDB[i] ,size = 22) plt.plot(Frequencies[maxList_posDB],dbs1[maxList_posDB],'+') plt.xlabel('Frequency [Hz]') plt.ylabel('1st Singular Values [dB]') plt.grid() plt.show() peakMagDB_hstack = np.hstack(peakMagDB) peakFreqDB_hstack = np.hstack(peakFreqDB_globalIndex[:,1]) numofpeaks = input('Enter desired number of peaks : ') numofpeaks = int(numofpeaks) selectedPeakIndex = np.empty(0,dtype = int) count = 0 while(count < numofpeaks): #Betyr, så lenge tellingen er under eller lik numofpeaks, så vi skal iterere oss igjennom antall peaks print(str(numofpeaks-count) + ' peak(s) remaining') peak = input('Enter the ID number of the peak you would like to include: ') foundSame = 0 for i in range(len(selectedPeakIndex)): if(int(peak) == selectedPeakIndex[i]): print("You have already selected peak :"+str(peak)) print("Please select new peak") foundSame = 1 break if(foundSame == 0): selectedPeakIndex = np.append(selectedPeakIndex,int(peak)) count+=1 selectedPeakIndex = np.sort(selectedPeakIndex) print('you have Chosen the following peaks..:') print(selectedPeakIndex) print("The selected peaks have been given new enumeration based on their ascending order") chosenPeaksFreq = np.empty(len(selectedPeakIndex),dtype=float) chosenPeaksMag = np.empty(len(selectedPeakIndex),dtype = float) chosenPeaksMS = np.empty((len(peakMS),len(selectedPeakIndex)),dtype = complex) for i in range(len(selectedPeakIndex)): chosenPeaksFreq[i] = peakFreqDB_hstack[selectedPeakIndex[i]-1] chosenPeaksMag[i] = peakMagDB_hstack[selectedPeakIndex[i]-1] chosenPeaksMS[:,i] = peakMS[:,selectedPeakIndex[i]-1] #Compute mode shapes outCols = np.size(chosenPeaksMS,1) outRows = (np.size(chosenPeaksMS,0)+2) outputWriteMatrix = np.empty((outRows,outCols),dtype=object) textArray = np.empty(outCols,dtype=object) for i in range(outCols): textArray[i] = str('Mode #'+str(i+1)) outputWriteMatrix[2:,:] = chosenPeaksMS outputWriteMatrix[1,:] = chosenPeaksFreq outputWriteMatrix[0,:] = textArray if(writeToFile == 1): git_FDDtools.createCsvFromMatrix(outputWriteMatrix, figTitle+'_Result.txt') plt.figure(figTitle) #plt.title('FDD Method',size = 38) #toolsAndMisc2.plotIntoLatex() plt.plot(Frequencies,dbs1) for i in range(len(chosenPeaksFreq)): plt.text(chosenPeaksFreq[i], chosenPeaksMag[i], '%s' % "#"+str(i+1), fontsize = 18 ) plt.plot(Frequencies[maxList_posDB],dbs1[maxList_posDB],'+') plt.xlabel('Frequency [Hz]') plt.ylabel('1st Singular values [dB]') plt.grid() plt.show() return Frequencies, dbs1, chosenPeaksFreq, chosenPeaksMag, chosenPeaksMS
import matplotlib.pyplot as plt import scipy.signal as sig N = 8129 # number of samples M = 256 # length of Wiener filter Om0 = 0.1 * np.pi # frequency of original signal N0 = 0.1 # PSD of additive white noise # generate original signal s = np.cos(Om0 * np.arange(N)) # generate observed signal g = 1 / 20 * np.asarray([1, 2, 3, 4, 5, 4, 3, 2, 1]) n = np.random.normal(size=N, scale=np.sqrt(N0)) x = np.convolve(s, g, mode='same') + n # estimate (cross) PSDs using Welch technique f, Pxx = sig.csd(x, x, nperseg=M) f, Psx = sig.csd(s, x, nperseg=M) # compute Wiener filter H = Psx / Pxx H = H * np.exp(-1j * 2 * np.pi / len(H) * np.arange(len(H)) * (len(H) // 2)) # shift for causal filter h = np.fft.irfft(H) # apply Wiener filter to observation y = np.convolve(x, h, mode='same') # plot (cross) PSDs Om = np.linspace(0, np.pi, num=len(H)) plt.figure(figsize=(10, 4)) plt.subplot(121) plt.plot(Om, 20 * np.log10(np.abs(.5 * Pxx)),
import numpy as np from scipy import signal import matplotlib.pyplot as plt # You want to phase align your inputs so you inject a Gaussian # noise source into both # Our noise source g = np.random.normal(0, 1, 10000) # Our receivers, one is delayed by one sample! X = g Y = g[1:] # Compute the cross power spectrum (cross spectral density) f, Pxy = signal.csd(X, Y, fs=1, nperseg=1024) # Plot CSD fig, (ax1, ax2) = plt.subplots(2, 1) fig.suptitle('Scipy CSD(X,Y) where X[n+1]==Y[n]') ax1.set_title('Mag.') ax1.set_ylabel('dB') ax1.plot(np.log10(np.abs(Pxy))) ax2.set_title('Phase') ax2.set_ylabel('rad') ax2.plot(np.angle(Pxy)) plt.show(block=False)
nue_nux_cspec_imag[r] = np.zeros((howmanyfreqs[r], howmanytimes[r])) beginhere[r] = Nperseg[r] / 2 + 1 if mod(Nperseg[r], 2) == 0: times_cspec[r] = t[r][beginhere[r]:-beginhere[r] + 1] else: times_cspec[r] = t[r][beginhere[r]:-beginhere[r]] print 'Computing cross spectrograms' for r in rotrates: bh = beginhere[r] getfreqs = 'yes' for time in range(howmanytimes[r]): bup = csd(gwc[r][time:time + Nperseg[r]], anuec[r][time:time + Nperseg[r]], fs=1. / dt[r], window=(Window), nperseg=Nperseg[r], noverlap=0, nfft=Nfft[r], scaling=Scaling) gw_anue_cspec_real[r][:, time] = real(bup[1]) gw_anue_cspec_imag[r][:, time] = imag(bup[1]) bup = csd(gwc[r][time:time + Nperseg[r]], nuxc[r][time:time + Nperseg[r]], fs=1. / dt[r], window=(Window), nperseg=Nperseg[r], noverlap=0, nfft=Nfft[r], scaling=Scaling) gw_nux_cspec_real[r][:, time] = real(bup[1]) gw_nux_cspec_imag[r][:, time] = imag(bup[1])
def pal5_abc(sdf_pepper, sdf_smooth, options): """ """ # Setup apar grid apar = numpy.arange(options.amin, options.amax, options.dapar) dens_unp = numpy.array([sdf_smooth._density_par(a) for a in apar]) if options.recompute: # Load density and omega from file outdens = options.outdens outomega = options.outomega if not options.batch is None: outdens = outdens.replace('.dat', '.%i.dat' % options.batch) if not options.batch is None: outomega = outomega.replace('.dat', '.%i.dat' % options.batch) densdata = numpy.genfromtxt(outdens, delimiter=',', skip_header=1) omegadata = numpy.genfromtxt(outomega, delimiter=',', skip_header=1) nd = 0 else: # Setup saving of the densities and mean Omegas denswriter, omegawriter, csvdens, csvomega=\ setup_densOmegaWriter(apar,options) # Setup sampling massrange = simulate_streampepper.parse_mass(options.mass) rs = simulate_streampepper.rs sample_GM= lambda: (10.**((-0.5)*massrange[0])\ +(10.**((-0.5)*massrange[1])\ -10.**((-0.5)*massrange[0]))\ *numpy.random.uniform())**(1./(-0.5))\ /bovy_conversion.mass_in_msol(V0,R0) sample_rs = lambda x: rs(x * bovy_conversion.mass_in_1010msol(V0, R0) * 10.**10., plummer=options.plummer) rate_range = numpy.arange(massrange[0] + 0.5, massrange[1] + 0.5, 1) cdmrate= numpy.sum([simulate_streampepper.\ dNencdm(sdf_pepper,10.**r,Xrs=options.Xrs, plummer=options.plummer, rsfac=options.rsfac) for r in rate_range]) print "Using an overall CDM rate of %f" % cdmrate # Load Pal 5 data to compare to if options.mockfilename is None: power_data, data_err, data_ppyr, data_ppyi=\ process_pal5_densdata(options) else: power_data, data_err, data_ppyr, data_ppyi=\ process_mock_densdata(options) # Run ABC while True: if not options.recompute: # Simulate a rate l10rate = (numpy.random.uniform() * (options.ratemax - options.ratemin) + options.ratemin) rate = 10.**l10rate * cdmrate print l10rate, rate # Simulate sdf_pepper.simulate(rate=rate, sample_GM=sample_GM, sample_rs=sample_rs, Xrs=options.Xrs) # Compute density and meanOmega and save try: densOmega= numpy.array([\ sdf_pepper._densityAndOmega_par_approx(a) for a in apar]).T except IndexError: # no hit dens = numpy.array([sdf_smooth._density_par(a) for a in apar]) omega = numpy.array( [sdf_smooth.meanOmega(a, oned=True) for a in apar]) else: dens = densOmega[0] omega = densOmega[1] write_dens = [l10rate] write_omega = [l10rate] write_dens.extend(list(dens)) write_omega.extend(list(omega)) denswriter.writerow(write_dens) omegawriter.writerow(write_omega) csvdens.flush() csvomega.flush() else: if nd >= len(densdata): break l10rate = densdata[nd, 0] dens = densdata[nd, 1:] omega = omegadata[nd, 1:] nd += 1 # Convert density to observed density xixi, dens = convert_dens_to_obs(sdf_pepper, apar, dens, omega, dens_unp, minxi=options.minxi, maxxi=options.maxxi) # Add errors (Rao-Blackwellize...) for ee in range(options.nerrsim): tdens = dens + numpy.random.normal(size=len(xixi)) * data_err # Compute power spectrum tcsd = signal.csd(tdens, tdens, fs=1. / (xixi[1] - xixi[0]), scaling='spectrum', nperseg=len(xixi))[1].real power = numpy.sqrt(tcsd * (xixi[-1] - xixi[0])) # Compute bispectrum Bspec, Bpx = bispectrum.bispectrum(numpy.vstack((tdens, tdens)).T, nfft=len(tdens), wind=7, nsamp=1, overlap=0) ppyr = numpy.fabs(Bspec[len(Bspec) // 2 + _BISPECIND, len(Bspec) // 2:].real) ppyi = numpy.fabs(Bspec[len(Bspec) // 2 + _BISPECIND, len(Bspec) // 2:].imag) yield (l10rate, numpy.fabs(power[1]-power_data[1]), numpy.fabs(power[2]-power_data[2]), numpy.fabs(power[3]-power_data[3]), numpy.fabs(numpy.log(numpy.mean(tdens[7:17])\ /numpy.mean(tdens[107:117]))), numpy.fabs(ppyr-data_ppyr)[_BISPECIND], numpy.fabs(ppyi-data_ppyi)[_BISPECIND], ee)
def test_covariances_cross_spectrum(rndstate): n_channels, n_times = 3, 1000 x = rndstate.randn(n_channels, n_times) cross_spectrum(x) cross_spectrum(x, fs=128, fmin=2, fmax=40) cross_spectrum(x, fs=129, window=37) with pytest.raises(ValueError): # window < 1 cross_spectrum(x, window=0) with pytest.raises(ValueError): # overlap <= 0 cross_spectrum(x, overlap=0) with pytest.raises(ValueError): # overlap >= 1 cross_spectrum(x, overlap=1) with pytest.raises(ValueError): # fmin > fmax cross_spectrum(x, fs=128, fmin=20, fmax=10) with pytest.raises(ValueError): # fmax > fs/2 cross_spectrum(x, fs=128, fmin=20, fmax=65) with pytest.warns(UserWarning): # fs is None cross_spectrum(x, fmin=12) with pytest.warns(UserWarning): # fs is None cross_spectrum(x, fmax=12) c, freqs = cross_spectrum(x, fs=128, window=256) assert c.shape[0] == c.shape[1] == n_channels assert c.shape[-1] == freqs.shape[0] # test if cross-spectra are hermitian, # ie with symmetric real parts and skew-symmetric imag parts assert is_hermitian(np.transpose(c, (2, 0, 1))) # test if DC bins are real (always true) assert is_real(c[..., 0]) # test if Nyquist bins are real (true when window is even) assert is_real(c[..., -1]) # test if auto-spectra are real assert is_real(c.diagonal()) # test equivalence between pyriemann and scipy for (auto-)spectra x = rndstate.randn(5, n_times) fs, window, overlap = 128, 256, 0.75 spect_pr, freqs_pr = cross_spectrum(x, fs=fs, window=window, overlap=overlap) spect_pr = np.diagonal(spect_pr.real).T # auto-spectra on diagonal spect_pr = spect_pr / np.linalg.norm(spect_pr) # unit norm freqs_sp, spect_sp = welch(x, fs=fs, nperseg=window, noverlap=int(overlap * window), window=np.hanning(window), detrend=False, scaling='spectrum') spect_sp /= np.linalg.norm(spect_sp) # unit norm # compare frequencies assert_array_almost_equal(freqs_pr, freqs_sp, 6) # compare auto-spectra assert_array_almost_equal(spect_pr, spect_sp, 6) # test equivalence between pyriemann and scipy for cross-spectra x = rndstate.randn(2, n_times) fs, window, overlap = 64, 128, 0.5 cross_pr, freqs_pr = cross_spectrum(x, fs=fs, window=window, overlap=overlap) cross_pr = cross_pr[0, 1] / np.linalg.norm(cross_pr[0, 1]) # unit norm freqs_sp, cross_sp = csd(x[0], x[1], fs=fs, nperseg=window, noverlap=int(overlap * window), window=np.hanning(window), detrend=False, scaling='spectrum') cross_sp /= np.linalg.norm(cross_sp) # unit norm # compare frequencies assert_array_almost_equal(freqs_pr, freqs_sp, 6) # compare cross-spectra assert_array_almost_equal(cross_pr, cross_sp, 6)
def exampleTrial(): #**************************************** #******* Simulation settings and variables #**************************************** # Simulation duration = 11000 tmin = 1000 dt = 0.05 simTypes = ['dc', 'rc'] fs = 1 / (dt * 1e-3) gsyn = {'dc': [], 'rc': []} vm = {'dc': [], 'rc': []} # Files and paths figsFolder = '/home/pablo/git/master-thesis/figuras/' dataPathVm = '/home/pablo/osf/Master-Thesis-Data/population/psd/cancel/trial2/trial1' dataPathGs = '/home/pablo/osf/Master-Thesis-Data/population/psd/cancel/trial1/trial1' #**************************************** #******* Running simulation for each case #**************************************** for j, simType in enumerate(simTypes): # Variables calculated gsynSOL = [] vmSOL = [] taux = [] # Inputs plots labels = {'rc': 'Entradas inibitórias', 'dc': 'Entradas excitatórias'} symbols = {'rc': 'k', 'dc': 'k--'} #**************************************** #******* Getting and processing for analysis #**************************************** fileName = dataPathGs + '/gsyn' + simType + '.dat' f = open(fileName, 'r') lines = f.readlines() for line in lines: taux.append(float(line.split()[0])) gsynSOL.append(float(line.split()[2])) f.close() fileName = dataPathVm + '/gsyn' + simType + '.dat' f = open(fileName, 'r') lines = f.readlines() for line in lines: vmSOL.append(float(line.split()[2])) f.close() #**************************************** #******* Gathering data for latter use #**************************************** staticInputG = [y for x, y in enumerate(gsynSOL) if taux[x] > tmin] staticInputV = [y for x, y in enumerate(vmSOL) if taux[x] > tmin] t = [y for x, y in enumerate(taux) if taux[x] > tmin] gsyn[simType] = staticInputG vm[simType] = staticInputV #**************************************** #******* Computing PSD and coherences #**************************************** fr = 1 nperseg = 4 * fs / 2 / fr noverlap = None nfft = None #8*nperseg detrend = False scale = 'spectrum' # Gsyn psd plots fig, ax1 = plt.subplots() for simType in simTypes: # Plot inputs PSD ff, PSD = signal.welch(gsyn[simType], fs, 'hann', nperseg, noverlap, nfft, detrend, scaling=scale) ax1.plot(ff, 1e6 * PSD, symbols[simType]) ax1.set_ylim([0, 700]) ax1.set_xlim([0, 50]) ax1.set_xlabel('Frequência (Hz)') ax1.set_ylabel('Densidade espectral de potência da força (N$^2$)') # creating inset axInset = zoomed_inset_axes(ax1, 8, loc=10) axInset.plot(ff, 1e6 * PSD, symbols[simType]) mark_inset(ax1, axInset, loc1=4, loc2=3, fc="none", ec="0.5") axInset.set_xlim(9, 11) axInset.set_ylim(0, 15) plt.savefig(figsFolder + 'res_gsynpsdex' + '.svg', format='svg') #plt.show() # Vm psd plots fig, ax2 = plt.subplots() for simType in simTypes: # Plot inputs PSD ff, PSD = signal.welch(vm[simType], fs, 'hann', nperseg, noverlap, nfft, detrend, scaling=scale) plt.plot(ff, PSD, symbols[simType]) ax2.set_ylim([0, .5]) ax2.set_xlim([0, 50]) ax2.set_xlabel('Frequência (Hz)') ax2.set_ylabel('Densidade espectral de potência da força (N$^2$)') # creating inset axInset = zoomed_inset_axes(ax2, 8, loc=10) axInset.plot(ff, PSD, symbols[simType]) mark_inset(ax2, axInset, loc1=4, loc2=3, fc="none", ec="0.5") axInset.set_xlim(9, 11) axInset.set_ylim(0, 0.003) plt.savefig(figsFolder + 'res_vmpsdex' + '.svg', format='svg') #plt.show() # Plot coherence between inputs (gsyn only) plt.figure() nperseg = 15000 fc, coherence = signal.coherence(gsyn['rc'], gsyn['dc'], fs, 'hann', nperseg, noverlap, nfft, detrend) plt.plot(fc, coherence, symbols[simType]) plt.xlabel('Frequência (Hz)') plt.ylabel('Módulo da coerência córtico-muscular') plt.grid() plt.xlim([0, 50]) plt.savefig(figsFolder + 'res_gsyncohex' + '.svg', format='svg') #plt.show() # cross spectral density used to study coherence phase _, crossSpectrum = signal.csd(gsyn['rc'], gsyn['dc'], fs, 'hann', nperseg, noverlap, nfft, detrend) # Cross spectral density plot fig, ax4 = plt.subplots() ax4.plot(fc, np.angle(crossSpectrum, deg=False), 'k') ax4.set_xlabel('Frequência (Hz)') ax4.set_ylabel('Fase da coerência córtico-muscular (rad)') ax4.grid() ax4.set_xlim([0, 50]) # setting ticks to pi fc_tick = np.arange(-1, 2) y_label = ['$-\pi$', '0', '$\pi$'] ax4.set_yticks(fc_tick * np.pi) ax4.set_yticklabels(y_label) plt.savefig(figsFolder + 'res_gsyncsdex' + '.svg', format='svg')
def getData(self): #{{{docstring """ Makes a DataFrame of the phase shifts obtained from the simulations. Returns ------- phaseShiftDataFrame : DataFrame DataFrame consisting of the variables (measured properties): * "phaseShift" over the observation "Scan" over the observation "modeNr" phaseShiftDataFrame : DataFrame DataFrame consisting of the variables (measured properties): * "phaseShift" over the observation "Scan" positionTuple : tuple The tuple containing (rho, theta, z). Needed in the plotting routine. uc : Units Converter The units converter used when obtaining the fourier modes. Needed in the plotting routine. """ #}}} # Guard if len(self._notCalled) > 0: message = "The following functions were not called:\n{}".\ format("\n".join(self._notCalled)) raise RuntimeError(message) # Collect the analytic phase shift # Create collect object ccagr = CollectAndCalcAnalyticGrowthRates(self._steadyStatePaths,\ self._scanParameter,\ self._yInd) # Obtain the data analyticalGRDataFrame, _, _, uc =\ ccagr.getData() # Recast the data frame analyticalGRDataFrame.drop("growthRate", axis=1, inplace=True) analyticalGRDataFrame.drop("angularFrequency", axis=1, inplace=True) analyticalPhaseShiftDataFrame =\ analyticalGRDataFrame.\ rename(columns={"phaseShiftNPhi":"phaseShift"}) # Get the levels correct analyticalPhaseShiftDataFrame = analyticalPhaseShiftDataFrame.swaplevel( ) # Recast the data frame phaseShiftDataFrame = 0 dataFrameDict = {"phaseShift": []} scanValues = [] # Collect the phase shift from the simulations loopOver = zip(self._scanCollectPaths,\ self._steadyStatePaths,\ self._tSlices ,\ ) # Loop over the folders for scanPaths, steadyStatePath, tSlice in loopOver: # Obtain the scan value scanValue = getScanValue(scanPaths, self._scanParameter) # Update with the correct tSlice self._indicesKwargs.update({"tSlice": tSlice}) # Obtain teh time traces n, phi, positionTuple = self._getTimeTraces(scanPaths) # Obtain the cross spectral density # NOTE: If this is below the number of samples, a smoothing # will occur nperseg = len(n) # NOTE: The triangular window corresponds to the periodogram # estimate of the spectral density # NOTE: The first output (frequency) is not used _, csd = signal.csd(n, phi, window="triang", nperseg=nperseg) maxInd = self._getMaxIndOfMagnitude(csd) # FIXME: Not sure why, but there seem to be a sign error avgPhaseShiftNPhi = -np.angle(csd[maxInd]) scanValues.append(scanValue) dataFrameDict["phaseShift"].append(avgPhaseShiftNPhi) # Make the data frame phaseShiftDataFrame = pd.DataFrame(dataFrameDict, index=scanValues) phaseShiftDataFrame.index.name = self._scanParameter return analyticalPhaseShiftDataFrame,\ phaseShiftDataFrame,\ positionTuple,\ uc
def cpu_version(self, cpu_x, cpu_y, fs, nperseg): return signal.csd(cpu_x, cpu_y, fs, nperseg=nperseg)
def plot_cross_correlation_locations( cases = [], case_names = [], root = '.', x_locs = [0], y_locs = [0], # Delta normalized component = 'vy', plot_name = 'Correlation_test.png', presentation = True, test = False, straight_only_at_TE = True ): """ Takes the cases, and plots the crosscorrelation for the requested locations, each location on a new figure Input: case: case name to compare to file names root_folder: where to find the pickled point time series x_locs: the streamwise locations to plot y_locs: the wall-normal locations to plot component: the velocity component PSD to plot plot_name Output: Figure """ import matplotlib.pyplot as plt import pandas as pd from numpy import argmin,array,abs,arctan,sqrt,exp,linspace from numpy.random import rand from scipy.signal import csd import os from math import pi import matplotlib as mpl if presentation: rc('font',family='sans-serif', serif='sans-serif') mpl.rcParams['text.latex.preamble'] = [ r'\usepackage{siunitx}' , r'\sisetup{detect-all}' , r'\usepackage{sansmath}' , r'\sansmath' ] if not len(case_names): for c in cases: case_names.append(c.replace("_",'-')) freq_lower_limit = 300 def remove_angle_jumps(df): from numpy import sign df.Phi.loc[df.Phi<0] = \ df.Phi.loc[df.Phi<0] + pi for ix in range(len(df))[:-2]: dif = df.Phi.ix[ix+1] - df.Phi.ix[ix] if abs(dif) > pi*0.4: df.Phi.ix[ix+1] = df.Phi.ix[ix+1] - sign(dif) * pi df.Phi.loc[df.Phi<0] = \ df.Phi.loc[df.Phi<0] + pi df.Phi.loc[df.f == df.f.max()] = \ df.Phi.loc[df.f == df.f.max()] + 2*pi return df def calculate_Uc(df,delta_x): #from scipy.interpolate import interp1d from scipy.stats import linregress #from numpy import linspace #df = pd.DataFrame( data = { # 'Phi':Phi, # 'f' :f #}) df = df.sort('f',ascending=True).reset_index(drop=True) r_value = 0 consider = len(df) while r_value**2<0.99: df = df.ix[:consider].reset_index(drop=True) slope, intercept, r_value, p_value, std_err = linregress( df.Phi, df.f ) consider -= 1 Uc = 2*pi*slope*delta_x/1000. return Uc, df, intercept, slope fig_Uc,axes_Uc = plt.subplots( len(x_locs),len(y_locs),figsize=(10,10), sharex=True,sharey=True ) fig_Phi,axes_Phi = plt.subplots( len(x_locs),len(y_locs),figsize=(10,10), sharex=True,sharey=True ) fig_Coh,axes_Coh = plt.subplots( len(x_locs),len(y_locs),figsize=(10,10), sharex=True,sharey=True ) step = 1 tooth_length = 40. for case_name,c_cnt,case_label,marker \ in zip(cases,range(len(cases)),case_names, markers_full[:len(cases)]): if 'a0' in case_name: delta = 9.6/1000. elif 'a12' in case_name: delta = 13.7/1000. else: delta = 0 # Build the data frame from pickled data if it's not provided print " Loading {0}".format(case_name) case_df = pd.read_hdf( os.path.join( root, case_name+"_WallNormalData.hdf5"), case_name ) # Normalize the y coordinates to the boundary layer size case_df.y = case_df.y*tooth_length/(delta*1000) # Get the available coordinates df_x_coords = array(sorted(case_df.x.unique(),reverse=False)) available_x_locs = [] available_x_neighbors = [] for x in x_locs: if "STE" in case_name and straight_only_at_TE: x = min(x_locs) x_av, dx = find_nearest(x, df_x_coords) neighbor_index = argmin(abs(df_x_coords-x_av))+step if neighbor_index < len(df_x_coords): available_x_neighbors.append( df_x_coords[neighbor_index] ) available_x_locs.append(x_av) if len(available_x_locs) and len(available_x_neighbors): for x_l,x_n,xi in zip( available_x_locs, available_x_neighbors, range(len(available_x_locs)) ): df_y_coords = \ case_df[case_df.x==x_l].y.unique() df_y_coords_neighbor = \ case_df[case_df.x==x_n].y.unique() for y_l,yi in zip(y_locs, range(len(y_locs))): y_av, yd = find_nearest(y_l , df_y_coords) y_n, yd = find_nearest(y_l , df_y_coords_neighbor) plt_idx = len(y_locs)-yi-1 time_series = case_df[ (case_df.x == x_l) &\ (case_df.y == y_av) ].sort('ti').reset_index(drop=True) time_series_neighbor = case_df[ (case_df.x == x_n) &\ (case_df.y == y_n) ].sort('ti').reset_index(drop=True) non_null_time_series = time_series[ time_series.vx.notnull() ] non_null_time_series_neighbor = \ time_series_neighbor[ time_series_neighbor.vx.notnull() ] text = axes_Phi[plt_idx][xi].text( x = 0.90, y = 0.10, s = "$x/2h = {0:.1f}$, $y/\\lambda = {1:.1f}$"\ .format(x_l,y_l), ha = 'right', transform = axes_Phi[plt_idx][xi].transAxes, zorder = 10 ) text.set_bbox(dict(color='white', alpha=0.5)) text = axes_Coh[plt_idx][xi].text( x = 0.90, y = 0.10, s = "$x/2h = {0:.1f}$, $y/\\lambda = {1:.1f}$"\ .format(x_l,y_l), ha = 'right', transform = axes_Coh[plt_idx][xi].transAxes, zorder = 10 ) text.set_bbox(dict(color='white', alpha=0.5)) text = axes_Uc[plt_idx][xi].text( x = 0.10, y = 0.10, s = "$x/2h = {0:.1f}$, $y/\\lambda = {1:.1f}$"\ .format(x_l,y_l), transform = axes_Uc[plt_idx][xi].transAxes, zorder = 10 ) text.set_bbox(dict(color='white', alpha=0.5)) if not non_null_time_series.empty\ and not non_null_time_series_neighbor.empty\ and len(non_null_time_series_neighbor) == \ len(non_null_time_series): max_lag = 10000 s1 = non_null_time_series[component]\ .values[0:max_lag] \ - non_null_time_series[component]\ .values[0:max_lag].mean() s2 = non_null_time_series_neighbor[component]\ .values[0:max_lag] \ - non_null_time_series_neighbor[component]\ .values[0:max_lag].mean() if test: s1 = rand(max_lag) s2 = rand(max_lag) f,Pxy = csd( s2,s1, nperseg = 2**6, fs = 10000, ) f,Pxx = csd( s1,s1, nperseg = 2**6, fs = 10000, ) f,Pyy = csd( s2,s2, nperseg = 2**6, fs = 10000, ) gamma_squared = \ abs(Pxy)**2 / ( Pxx * Pyy ) gamma = sqrt(gamma_squared) Phi = arctan( Pxy.imag / Pxy.real ) df = pd.DataFrame( data = { 'Phi':Phi, 'f' :f, 'gamma': gamma }) df = df[df.f >= freq_lower_limit].reset_index( drop = True ) df = remove_angle_jumps(df) df = remove_angle_jumps(df) line = axes_Phi[plt_idx][xi].plot( get_Strouhal(df.f,delta,U), df.Phi, alpha = 0.3, ) eta = 0.22 axes_Coh[plt_idx][xi].plot( linspace(0,2*pi,30), exp(-eta * linspace(0,2*pi,30)), '--', color = 'k', ) axes_Coh[plt_idx][xi].scatter( df.Phi, df.gamma, color = line[0].get_color(), alpha = 0.3, marker = marker ) Uc,df,intercept,slope = calculate_Uc( df, delta_x = abs(x_n - x_l) * tooth_length ) df.Strouhal = get_Strouhal(df.f,delta,U) axes_Coh[plt_idx][xi].scatter( df.Phi, df.gamma, color = line[0].get_color(), label = case_label, marker = marker ) axes_Phi[plt_idx][xi].plot( df.Strouhal, df.Phi, color = line[0].get_color(), label = case_label ) if df.f.max()>1000: axes_Phi[plt_idx][xi].plot( df.Strouhal, df.f*slope**(-1), '--', color = line[0].get_color(), ) bar_width = 1. axes_Uc[plt_idx][xi].bar( left = c_cnt+bar_width/2.5, width = bar_width*0.8, color = line[0].get_color(), height = Uc/20., label = case_label ) for axi in axes_Phi: for ax in axi: ax.set_yticks(array( [0,1/4.,1/2.,3/4.,1,5./4.,3/2.,7/4.,2] )*pi) ax.set_yticklabels( ['$0$','$\\pi/4$','$\\pi/2$','$3\\pi /4$','$\\pi$', '$5\\pi/4$','$3\\pi/2$','$7\\pi /4$','$2\\pi$' ] ) ax.set_xlim(St_min,St_max) ax.set_ylim(0,2*pi) ax.set_xlabel("") ax.set_ylabel("") for axi in axes_Coh: for ax in axi: ax.set_xticks(array( [0,1/2.,1,3/2.,2] )*pi) ax.set_xticklabels( ['$0$','$\\pi/2$','$\\pi$', '$3\\pi/2$','$2\\pi$' ] ) ax.set_ylim(0,1) ax.set_xlim(0,2*pi) ax.set_xlabel("") ax.set_ylabel("") for axi in axes_Uc: for ax in axi: #ax.set_xscale('log') ax.set_xticks(range(len(cases))) ax.set_xticklabels(['']*len(cases)) ax.set_ylim(0.2,1.2) ax.set_xlabel("") ax.set_ylabel("") axes_Phi[len(x_locs)-1][0].\ set_xlabel("$\\textrm{{St}}_\\delta$") axes_Phi[len(x_locs)-1][0].\ set_ylabel( "$\\phi_{{x,x+\\Delta x}},\, {0}$ [rad]"\ .format(component_dict[component],x_n-x_l) ) axes_Coh[len(x_locs)-1][0].\ set_xlabel("$\\phi = \mu_{{x0}}\\Delta x$") axes_Coh[len(x_locs)-1][0].\ set_ylabel( "$\\gamma$" ) axes_Uc[len(x_locs)-1][0].\ set_xlabel("$\\textrm{{St}}_\\delta$") axes_Uc[len(x_locs)-1][0].\ set_ylabel( "$U_{{c}}/U_{{\infty}}$"\ .format(component_dict[component]) ) axes_Phi[0][0].legend( bbox_to_anchor = (0., 1.02, len(x_locs), .102), loc = 3, ncol = 2, mode = "expand", borderaxespad = 0. ) axes_Coh[0][0].legend( bbox_to_anchor = (0., 1.02, len(x_locs), .102), loc = 3, ncol = 2, mode = "expand", borderaxespad = 0. ) axes_Uc[0][0].legend( bbox_to_anchor = (0., 1.02, len(x_locs), .102), loc = 3, ncol = 2, mode = "expand", borderaxespad = 0. ) axes_Coh[0][0].annotate( "$\\textrm{exp}\\left(-\\eta\\phi\\right)$", xy=(pi/2., exp(-eta*pi/2.)), xycoords='data', xytext=(pi/2.+pi/2., exp(-eta*pi/2.)+0.1), textcoords='data', size=15, # bbox=dict(boxstyle="round", fc="0.8"), arrowprops=dict( arrowstyle='simple', fc="k", ec="w", #patchB=el, connectionstyle="arc3,rad=0.3", ), ) fig_Phi.savefig( plot_name.replace('.png','_PhaseSpectra.png'), bbox_inches='tight' ) fig_Coh.savefig( plot_name.replace('.png','_Coherence.png'), bbox_inches='tight' ) fig_Uc.savefig( plot_name.replace('.png','_ConvectionVelocity.png'), bbox_inches='tight' ) return 0
def tfe_sig(y, x, *args, **kwargs): """estimate transfer function from x to y, see csd for calling convention""" fxy, sxy = sig.csd(y, x, *args, **kwargs) fxx, sxx = sig.csd(x, x, *args, **kwargs) return sxy / sxx, fxx
def cohe(s1, s2, sr): f, Pxy = sg.csd(s1, s2, sr) f, Pxx = sg.welch(s1, sr) f, Pyy = sg.welch(s2, sr) coh = (np.abs(Pxy)**2) / (Pxx * Pyy) return f, coh
def residual_spectrum(xres, fu, dt): """RESIDUAL_SPECTRUM: Computes statistics from an input spectrum over a number of bands, returning the band limits and the estimates for power spectra for real and imaginary parts and the cross-spectrum. Mean values of the noise spectrum are computed for the following 8 frequency bands defined by their center frequency and band width: M0 +.1 cpd; M1 +-.2 cpd; M2 +-.2 cpd; M3 +-.2 cpd; M4 +-.2 cpd; M5 +-.2 cpd; M6 +-.21 cpd; M7 (.26-.29 cpd); and M8 (.30-.50 cpd). S. Lentz 10/28/99 R. Pawlowicz 11/1/00 Version 1.0 Define frequency bands for spectral averaging. """ fband = np.array([[0.0001, 0.00417], [0.03192, 0.04859], [0.07218, 0.08884], [0.11243, 0.1291], [0.15269, 0.16936], [0.19295, 0.20961], [0.2332, 0.251], [0.26, 0.29], [0.3, 0.5]]) # If we have a sampling interval> 1 hour, we might have to get # rid of some bins. # fband(fband(:,1)>1/(2*dt),:)=[]; nfband = fband.shape[0] nx = max(xres.shape) # Spectral estimate (takes real time series only). fx, Pxr = sps.welch(np.real(xres), window=np.hanning(nx), noverlap=np.ceil(nx / 2), nfft=nx, fs=1 / dt, nperseg=nx) Pxr = Pxr / 2 / dt fx, Pxi = sps.welch(np.imag(xres), window=np.hanning(nx), noverlap=np.ceil(nx / 2), nfft=nx, fs=1 / dt, nperseg=nx) Pxi = Pxi / 2 / dt #Pxc, fx = mplm.csd(np.real(xres), np.imag(xres), nx, 1 / dt) fx, Pxc = sps.csd( np.real(xres), np.imag(xres), fs=1 / dt, nperseg=nx, nfft=nx, ) # matlab cpsd returns only reals when given a real xres have to # test for complex and maybe change to ifstatement Pxc = np.real(Pxc) Pxc = Pxc / 2 / dt df = fx[2] - fx[1] # Sets Px=NaN in bins close to analyzed frequencies # to prevent leakage problems?). Pxr[np.around(fu / df).astype(int)] = np.nan Pxi[np.around(fu / df).astype(int)] = np.nan Pxc[np.around(fu / df).astype(int)] = np.nan Pxrave = np.zeros(shape=(nfband, 1), dtype='float64') Pxiave = np.zeros(shape=(nfband, 1), dtype='float64') Pxcave = np.zeros(shape=(nfband, 1), dtype='float64') # Loop downwards in frequency through bands (cures short time series # problem with no data in lowest band). # Divide by nx to get power per frequency bin, and multiply by 2 # to account for positive and negative frequencies. for k in range(nfband - 1, -1, -1): jband = np.flatnonzero( np.all(np.vstack( [fx >= fband[(k), 0], fx <= fband[(k), 1], np.isfinite(Pxr)]).T, axis=1)) if any(jband): Pxrave[k] = 2 * np.mean(Pxr[(jband)]) / nx Pxiave[k] = 2 * np.mean(Pxi[(jband)]) / nx Pxcave[k] = 2 * np.mean(Pxc[(jband)]) / nx else: if k < nfband: Pxrave[k] = Pxrave[(k + 1)] # Low frequency bin might not have any points... Pxiave[k] = Pxiave[(k + 1)] Pxcave[k] = Pxcave[(k + 1)] return fband, Pxrave, Pxiave, Pxcave
def test_detrend_linear(self): x = np.arange(10, dtype=np.float64) + 0.04 f, p = csd(x, x, nperseg=10, detrend='linear') assert_allclose(p, np.zeros_like(p), atol=1e-15)
def time_csd(self): signal.csd(self.x, self.y)