def cross_wavelet_transform(y1, y2, dt, dj=1 / 12, s0=-1, J=-1, wavelet='morlet', normalize=True): wavelet = pycwt.wavelet._check_parameter_wavelet(wavelet) # Makes sure input signal are numpy arrays. y1 = np.asarray(y1) y2 = np.asarray(y2) # Calculates the standard deviation of both input signals. std1 = y1.std() std2 = y2.std() # Normalizes both signals, if appropriate. if normalize: y1_normal = (y1 - y1.mean()) / std1 y2_normal = (y2 - y2.mean()) / std2 else: y1_normal = y1 y2_normal = y2 # Calculates the CWT of the time-series making sure the same parameters # are used in both calculations. _kwargs = dict(dj=dj, s0=s0, J=J, wavelet=wavelet) W1, sj, freq, coi, _, _ = pycwt.cwt(y1_normal, dt, **_kwargs) W2, sj, freq, coi, _, _ = pycwt.cwt(y2_normal, dt, **_kwargs) # Calculates the cross CWT of y1 and y2. W12 = W1 * W2.conj() return W12, coi, freq
def get_lf0_le_cwt(lf0, le): mother = wavelet.MexicanHat() dt = 0.005 dj = 1 s0 = dt * 2 J = 9 C_delta = 3.541 Wavelet_lf0, scales, _, _, _, _ = wavelet.cwt(np.squeeze(lf0), dt, dj, s0, J, mother) Wavelet_le, scales, _, _, _, _ = wavelet.cwt(np.squeeze(le), dt, dj, s0, J, mother) Wavelet_lf0 = np.real(Wavelet_lf0).T Wavelet_le = np.real(Wavelet_le).T # (T, D=10) lf0_le_cwt = np.concatenate((Wavelet_lf0, Wavelet_le), -1) return lf0_le_cwt
def cwt(self): """ Compute the continuous wavelet transform Returns ------- bool True if successful, False otherwise Atributes --------- wt : ndarray Real part of the wavelet coefficients wtN : ndarray Real part of the wavelet coefficients normalized (wt-<wt>)/\sigma """ try: wt, sc, freqs, coi, fft, fftfreqs = wav.cwt( self.sig, self.dt, 0.25, self.scale, 0, self.mother) self.wt = np.real(np.squeeze(wt)) self.wtN = (self.wt - self.wt.mean()) / self.wt.std() return True except BaseException: return False
def calculate_power_wavelet(rr_intervals, heart_rate=4, mother_wave='morlet'): """ Method to calculate the spectral power using wavelet method. Parameters ---------- rr_intervals: array-like list of RR interval (in ms) heart_rate: int values = The range of heart rate frequency * 2 mother_wave: str The main waveform to transform data. Available waves are: 'gaussian': 'paul': apply lomb method to compute PSD 'mexican_hat': Returns ------- freq : list Frequency of the corresponding psd points. psd : list Power Spectral Density of the signal. """ dt = 1 / heart_rate if mother_wave in mother_wave_dict.keys(): mother_morlet = mother_wave_dict[mother_wave] else: mother_morlet = wavelet.Morlet() wave, scales, freqs, coi, fft, fftfreqs = \ wavelet.cwt(rr_intervals, dt, wavelet=mother_morlet) powers = (np.abs(wave))**2 return freqs, powers
def plot_tfr(times, freqs, data, mother=None): ''' Plots time frequency representations of analog signal with PSD estimation Parameters ---------- times : array freqs : array power : array mother : wavelet ''' import pycwt if mother is None: mother = pycwt.Morlet() sampling_period = times[1] - times[0] wave, scales, freqs, coi, fft, fftfreqs = pycwt.cwt(data, sampling_period, freqs=freqs, wavelet=mother) power = (numpy.abs(wave)) ** 2 power /= scales[:, None] #rectify the power spectrum according to the suggestions proposed by Liu et al. (2007) fft_power = numpy.abs(fft) ** 2 gs = gridspec.GridSpec(3, 3) ax_pow = plt.subplot(gs[:2, 1:3]) ax_pow.set_xlim(*times[[0,-1]]) ax_pow.set_ylim(*freqs[[0,-1]]) ax_fft = plt.subplot(gs[:2, 0], sharey=ax_pow) ax_sig = plt.subplot(gs[2, 1:3], sharex=ax_pow) ax_pow.contourf(times, freqs, power, levels=100) ax_sig.plot(times, data) ax_fft.plot(fft_power, fftfreqs)
def calculate_cwt(self, memory_buffer, return_plot_info=False): coefs_array = [] if not return_plot_info: row_count = self.scale_count col_count = memory_buffer.length coefs_array = np.ndarray( [row_count, col_count, memory_buffer.channel_count]) channel_index = 0 buffer = memory_buffer.get_buffer() for idx, x in enumerate(buffer): channel_name = memory_buffer.channels[idx] x = self.__normalize(x) coefs, scales, freqs, coi, fft, fftfreqs = pycwt.cwt( x, self.delta_t, wavelet=self.wavelet_type, freqs=self.freqs) coefs = self.__normalize(coefs.real) # "clean" values away from the max/min #coefs = np.power(coefs, 3) if not return_plot_info: for i in range(row_count): for j in range(col_count): coefs_array[i][j][channel_index] = coefs[i][j] channel_index += 1 else: coefs_array.append((channel_name, coefs, x, coi)) return coefs_array
def wavel(signal, cadence): mother = 'morlet' sig_level = 0.95 #/ signal.std() t1 = np.linspace(0, cadence * signal.size, signal.size) wave, scales, freqs, coi = wavelet.cwt((signal - signal.mean()), cadence, wavelet=mother, dj=1 / 100.) power = (np.abs(wave))**2 # / scales[:,None] period = 1 / freqs alpha = 0.0 ## (variance=1 for the normalized SST) signif, fft_theor = wavelet.significance(signal, period, scales, 0, alpha, significance_level=sig_level, wavelet=mother) sig95 = np.ones([1, signal.size]) * signif[:, None] sig95 = power / sig95 ## indices for stuff idx = find_closest(period, coi.max()) ## Into minutes t1 /= 60 period /= 60 coi /= 60 return wave, scales, sig95, idx, t1, coi, period, power
def remove_frequencies_and_save_to_csv(dtfrm, band_pass, high_pass, low_freq_limit, high_freq_limit, width, delta_t): [row, column] = dtfrm.shape frame = pd.DataFrame() i = 0 while i < column: y = dtfrm.iloc[:, i].tolist() wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt( y, delta_t, J=width - 1, wavelet=u'mexicanhat') wave = select_frequencies(wave, width, band_pass, high_pass, delta_t, low_freq_limit, high_freq_limit) xrec = wavelet.icwt(wave, scales, delta_t, wavelet=u'mexicanhat') xrec = normalize(xrec) xrec = pd.Series(xrec) frame = pd.concat([frame, xrec], axis=1, ignore_index=True) i = i + 1 print("Select a folder") path = th.ui.getdir('Select a directory to save the csv file' ) # prompts user to select folder frame.to_csv(path + "/" + "new_dtfrm.csv")
def cwt(self, rectify=False): """ Operating cwt. Parameter --------- None Return ------ wave : numpy.ndarray Wavelet transform according to the selected mother wavelet. Has (J+1) x N dimensions. scales : numpy.ndarray Vector of scale indices given by sj = s0 * 2**(j * dj), j={0, 1, ..., J}. freqs : array like Vector of Fourier frequencies (in 1 / time units) that corresponds to the wavelet scales. coi : numpy.ndarray Returns the cone of influence, which is a vector of N points containing the maximum Fourier period of useful information at that particular time. Periods greater than those are subject to edge effects. fft : numpy.ndarray Normalized fast Fourier transform of the input signal. fft_freqs : numpy.ndarray Fourier frequencies (in 1/time units) for the calculated FFT spectrum. Example ------- wave, scales, coi, fft, fft_freqs = cwt.cwt() """ wave, \ scales, \ freqs, \ coi, \ fft, \ fft_freqs \ = wavelet.cwt( self.dat_norm, self.feature.dt, self.feature.dj, self.feature.minimum_scale, self.feature.J, self.feature.mother, self.feature.freqs ) wavelet_spectra = WaveletSpectora(wave, scales, freqs, coi, rectify, self.feature) fourier_spectra = FourierSpectora(fft, fft_freqs) self.wavelet_spectra = wavelet_spectra self.fourier_spectra = fourier_spectra return (wavelet_spectra, fourier_spectra)
def calculateCWT(t,s,steps=32): mother = wavelet.Morlet(6) deltaT = t[1] - t[0] dj = 1 / steps # sub-octaves per octaves s0 = 2 * deltaT # Starting scale, here 2 months wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(s, deltaT, dj, s0, -1, mother) # Normalized wavelet power spectra power = (np.abs(wave)) ** 2 return power,scales,coi,freqs
def get_lf0_cwt(lf0): mother = wavelet.MexicanHat() dt = 0.005 dj = 0.015 s0 = dt*2 J = 513 - 1 Wavelet_lf0, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(np.squeeze(lf0), dt, dj, s0, J, mother) Wavelet_lf0 = np.real(Wavelet_lf0).T return Wavelet_lf0, scales
def getCWT_auxInfoPlus(signal): [_, sj, freqs, coi, _, _] = pycwt.cwt(signal, TR, s0=s0, dj=dj, J=J, wavelet=mother) return sj, freqs, coi, mother, s0, dj, J, s1, TR
def getCWT_coeff(signal): [coefficients, _, _, _, _, _] = pycwt.cwt(signal, TR, s0=s0, dj=dj, J=J, wavelet=mother) return coefficients
def xwt(trace_ref, trace_current, fs, ns, nt, vpo, freqmin, freqmax, nptsfreq): # Choosing a Morlet wavelet with a central frequency w0 = 6 mother = wavelet.Morlet(6.) # nx represent the number of element in the trace_current array nx = np.size(trace_current) x_reference = np.transpose(trace_ref) x_current = np.transpose(trace_current) # Sampling interval dt = 1 / fs # Spacing between discrete scales, the default value is 1/12 dj = 1 / vpo # Number of scales less one, -1 refers to the default value which is J = (log2(N * dt / so)) / dj. J = -1 # Smallest scale of the wavelet, default value is 2*dt s0 = 2 * dt # Smallest scale of the wavelet, default value is 2*dt # Creation of the frequency vector that we will use in the continuous wavelet transform freqlim = np.linspace(freqmax, freqmin, num=nptsfreq, endpoint=True, retstep=False, dtype=None, axis=0) # Calculation of the two wavelet transform independently # scales are calculated using the wavelet Fourier wavelength # fft : Normalized fast Fourier transform of the input trace # fftfreqs : Fourier frequencies for the calculated FFT spectrum. cwt_reference, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(x_reference, dt, dj, s0, J, mother, freqs=freqlim) cwt_current, _, _, _, _, _ = wavelet.cwt(x_current, dt, dj, s0, J, mother, freqs=freqlim) scales = np.array([[kk] for kk in scales]) invscales = np.kron(np.ones((1, nx)), 1 / scales) cfs1 = smoothCFS(invscales * abs(cwt_reference) ** 2, scales, dt, ns, nt) cfs2 = smoothCFS(invscales * abs(cwt_current) ** 2, scales, dt, ns, nt) crossCFS = cwt_reference * np.conj(cwt_current) WXamp = abs(crossCFS) # cross-wavelet transform operation with smoothing crossCFS = smoothCFS(invscales * crossCFS, scales, dt, ns, nt) WXspec = crossCFS / (np.sqrt(cfs1) * np.sqrt(cfs2)) WXangle = np.angle(WXspec) Wcoh = abs(crossCFS) ** 2 / (cfs1 * cfs2) pp = 2 * np.pi * freqs pp2 = np.array([[kk] for kk in pp]) WXdt = WXangle / np.kron(np.ones((1, nx)), pp2) return WXamp, WXspec, WXangle, Wcoh, WXdt, freqs, coi
def getCWT(signal): [coefficients, _, _, _, _, _] = pycwt.cwt(signal, TR, s0=s0, dj=dj, J=J, wavelet=mother) power = np.absolute(coefficients) return power
def get_lf0_cwt(lf0): mother = wavelet.MexicanHat() # dt = 0.005 dt = 0.005 dj = 1 s0 = dt * 2 J = 9 Wavelet_lf0, scales, freqs, coi, fft, fftfreqs = wavelet.cwt( np.squeeze(lf0), dt, dj, s0, J, mother) Wavelet_lf0 = np.real(Wavelet_lf0).T # 取实数 并进行转置 return Wavelet_lf0, scales
def noise_estimate(indata, f_h, f_l): nt = indata.size tt = np.arange(0, nt) / Fs pf = np.polyfit(tt, indata, 1) indata_norm = indata - np.polyval(pf, tt) i_wave, i_scales, i_freqs, i_coi, i_fft, i_fftfreqs = wavelet.cwt(indata_norm, 1 / Fs, dj, s0, J, mother) i_power = (np.abs(i_wave)) ** 2 i_period = 1 / i_freqs i_sel = find((i_period >= 1 / f_h) & (i_period < 1 / f_l)) # select frequency band for averaging i_Cdelta = mother.cdelta i_scale_avg = (i_scales * np.ones((nt, 1))).transpose() i_scale_avg = i_power / i_scale_avg # As in Torrence and Compo (1998) equation 24 i_scale_avg = dj / Fs * i_Cdelta * i_scale_avg[i_sel, :].sum(axis=0) i_max = max(i_scale_avg) return i_max
def wavelet_decompose_power_spectrum(signal, wl=None, resample=None, resample_freq=None, sampling_frequency=None, filter_frequency=40, dt=1): """ :param signal: The signal, a numpy array or PyTorch Tensor of shape (N,) :param wl: Provided Wavelet (see pycwt documentation for available wavelets) :param resample: Downsample factor for signal time series. :param resample_freq: Downsample factor for wavelet frequency plane. :param sampling_frequency: Sampling frequency to be used by the butterworth filter, if provided. :param filter_frequency: Filter frequency for the butterworth filter :param dt: Sampling interval Sampling interval for the continuous wavelet transform. :return: Resampled time series, Resamples frequency series, power spectrum of shape (Frequencies, Time), Original signal. """ if resample is not None: signal = sp.resample(signal, signal.shape[0] // resample) if isinstance(signal, torch.Tensor): signal = signal.numpy() # Butterworth filter if sampling_frequency is not None: sos = sp.butter(5, filter_frequency, 'low', fs=sampling_frequency, output='sos') signal = sp.sosfilt(sos, signal) time = np.arange(signal.shape[0]) # p = np.polyfit(time, signal, 1) # dat_notrend = signal - np.polyval(p, time) # std = dat_notrend.std() # Standard deviation # dat_norm = dat_notrend / std # Normalized dataset if wl is None: wl = wavelet.Morlet(6) wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(signal, dt, wavelet=wl) power = (np.abs(wave)) ** 2 power /= scales[:, None] if resample_freq is not None: power = sp.resample(power, num=resample_freq, axis=0) freqs = sp.resample(freqs, num=resample_freq) return time, np.array(freqs), power, signal
def continuous_wavelet_transform(data): """ Written using the tutorial at https://pycwt.readthedocs.io/en/latest/tutorial.html""" dt = 0.25 dj = 1 / 12 dat = (data - data.mean()) / data.std() s0 = 2 * dt J = 7 / dj mother = wavelet.Morlet(6) wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt( dat, dt, dj, s0, J, mother) power = (np.abs(wave))**2 power = np.log2(power) return power
def wavelet(data, sampling_rate, f_start, f_stop, f_step=1, morlet=20): mother = pycwt.Morlet(morlet) # Morlet size freqs = np.arange(f_start, f_stop + f_step, f_step) # Frequency range wave, scales, freqs, coi, fft, fftfreqs = pycwt.cwt(data, 1. / sampling_rate, freqs=freqs, wavelet=mother) power = (np.abs(wave))**2 power /= scales[:, None] #rectify the power spectrum according to suggestions proposed by Liu et al. (2007) mask_coi(power, freqs, coi) return freqs, power
def get_lf0_cwt(lf0): mother = wavelet.MexicanHat() #dt = 0.005 dt = 0.005 dj = 1 s0 = dt * 2 J = 9 #C_delta = 3.541 #Wavelet_lf0, scales, _, _, _, _ = wavelet.cwt(np.squeeze(lf0), dt, dj, s0, J, mother) Wavelet_lf0, scales, freqs, coi, fft, fftfreqs = wavelet.cwt( np.squeeze(lf0), dt, dj, s0, J, mother) #Wavelet_le, scales, _, _, _, _ = wavelet.cwt(np.squeeze(le), dt, dj, s0, J, mother) Wavelet_lf0 = np.real(Wavelet_lf0).T #Wavelet_le = np.real(Wavelet_le).T # (T, D=10) #0lf0_le_cwt = np.concatenate((Wavelet_lf0, Wavelet_le), -1) # iwave = wavelet.icwt(np.squeeze(lf0), scales, dt, dj, mother) * std return Wavelet_lf0, scales
def continuous_wavelet_transform(data, octave_exponent=10, sub_octaves=25, starting_scale=2, dt=1): """ Generate a continuous wavelet transform using pycwt """ std = data.std() # Standard deviation dat = (data - data.mean()) / std # Calculating anomaly and normalizing dj = 1 / sub_octaves # x sub-octaves per octaves s0 = starting_scale # Starting scale J = octave_exponent / dj # x powers of two with dj sub-octaves mother = wavelet.Morlet(6) # Morlet mother wavelet with m=6 # The following routines perform the wavelet transform and siginificance # analysis for the chosen data set. wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt( dat, dt, dj, s0, J, mother) #print('wave.shape', wave.shape) # Normalized wavelet and Fourier power spectra wave = (np.abs(wave))**2 # Resize: averaging_window = 160 #print('before reshape', wave.shape[1]) height = wave.shape[0] rounded_shape = wave.shape[1] - wave.shape[1] % averaging_window #print(rounded_shape) wave = wave[:, :rounded_shape].reshape(-1, averaging_window).mean( axis=1).reshape(height, int(rounded_shape / averaging_window)) #print('after reshape', wave.shape) # Normalize to (0, 1) # wave = (wave - np.min(wave)) / (np.max(wave) - np.min(wave)) # Vertical Flip # wave = np.flipud(wave) # Logarithm wave = np.log2(wave) return wave
def _padded_cwt(params, dt, dj, s0, J, mother, padding_len): """Private function to compute a wavelet transform on padded data Parameters ---------- params: arraylike The prosodic parameters. dt: ? ? dj: ? ? s0: ? ? J: ? ? mother: ? The mother wavelet. padding_len: int The padding length Returns ------- wavelet_matrix: ndarray The wavelet data resulting from the analysis scales: arraylike The scale indices corresponding to the wavelet data freqs: ? ? coi: array The cone of influence values fft: ? ? fftfreqs: ? ? """ #padded = concatenate([params,params,params]) padded = pad(params, padding_len, mode='edge') #edge wavelet_matrix, scales, freqs, coi, fft, fftfreqs = cwt.cwt( padded, dt, dj, s0, J, mother) wavelet_matrix = _unpad(wavelet_matrix, padding_len) #wavelet_matrix = _unpad(wavelet_matrix, len(params)) return (wavelet_matrix, scales, freqs, coi, fft, fftfreqs)
def plot_spectrum(sig, wavename, casename, t1, t2, fs=1 / 60): """ Plot the spectrogram from a given signal, timeframes and frequency :param sig: The processed signal to plot :param wavename: The name of the wavelet :param casename: The name of the analysed column :param t1: The starting timeframe :param t2: The ending timeframe :param fs: The frequency, defaults to 1/60 :return: Plots the spectrogram saving the results to a file """ T = np.array(range(t1, t2)) dat = sig[T] dt = 1 / fs t = T / fs / 60 / 60 dat_norm = dat / dat.std() # Normalized dataset mother = wavelet.Morlet(6) s0 = 2 * dt dj = 1 / 12 J = 7 / dj wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt( dat_norm, dt, dj, s0, J, mother) power = (np.abs(wave))**2 period = 1 / freqs / 60 / 60 plt.figure(1, figsize=(6.4, 4.8)) bx = plt.axes([0.1, 0.37, 0.65, 0.28]) levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16] bx.contourf(t, period, power, np.log2(levels), extend='both', cmap=plt.cm.prism) bx.set_ylabel('Period (hours)') bx.set_xlabel('Time (hours)') plt.savefig('./output/{}_2.png'.format(casename), dpi=300) plt.close('all')
def get_cwt(self, signal): scales = self.get_scales() # """get frequencies range of interest 0.67 ~~ 4 Hz""" MorletFourierFactor = 4 * math.pi / (6 + math.sqrt(2 + 6**2)) freqs = 1 / (scales * MorletFourierFactor) coef, scales, _, coi, fft, fftfreqs = wavelet.cwt(signal, 1 / self.samplingRate, wavelet='morlet', freqs=freqs) firstScaleIndex = np.where(freqs < self.maxFreq)[0][0] lastScaleIndex = np.where(freqs > self.minFreq)[0][-1] energyProfile = np.abs(coef) max_index = np.argmax(energyProfile[firstScaleIndex:lastScaleIndex, :], axis=0) instantPulseRate = 60 * freqs[firstScaleIndex + max_index] return coef, instantPulseRate
def get_graph_from_file(in_filepath, out_folder, out_filename): # Get data # TODO there are differents formats of file # TODO implement differents parsers by parameters of function p1 = numpy.genfromtxt(in_filepath) # TODO fix this shit dat = p1 title = 'NINO3 Sea Surface Temperature' label = 'NINO3 SST' units = 'degC' # Values for calculations # TODO spike about args t0 = 12.0 # start time dt = 0.5 # step of differentiation - in minutes N = dat.size t = numpy.arange(0, N) * dt + t0 p = numpy.polyfit(t - t0, dat, 1) dat_notrend = dat - numpy.polyval(p, t - t0) std = dat_notrend.std() # Standard deviation var = std**2 # Variance dat_norm = dat_notrend / std # Normalized dataset mother = wavelet.Morlet(6) s0 = 2 * dt # Starting scale, in this case 2 * 0.25 years = 6 months dj = 1 / 12 # Twelve sub-octaves per octaves J = 7 / dj # Seven powers of two with dj sub-octaves alpha, _, _ = wavelet.ar1(dat) # Lag-1 autocorrelation for red noise wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt( dat_norm, dt, dj, s0, J, mother) iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std power = (numpy.abs(wave))**2 fft_power = numpy.abs(fft)**2 period = 1 / freqs power /= scales[:, None] signif, fft_theor = wavelet.significance(1.0, dt, scales, 0, alpha, significance_level=0.95, wavelet=mother) sig95 = numpy.ones([1, N]) * signif[:, None] sig95 = power / sig95 glbl_power = power.mean(axis=1) dof = N - scales # Correction for padding at edges glbl_signif, tmp = wavelet.significance(var, dt, scales, 1, alpha, significance_level=0.95, dof=dof, wavelet=mother) sel = find((period >= 2) & (period < 8)) Cdelta = mother.cdelta scale_avg = (scales * numpy.ones((N, 1))).transpose() scale_avg = power / scale_avg # As in Torrence and Compo (1998) equation 24 scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0) scale_avg_signif, tmp = wavelet.significance( var, dt, scales, 2, alpha, significance_level=0.95, dof=[scales[sel[0]], scales[sel[-1]]], wavelet=mother) # Prepare the figure pyplot.close('all') #pyplot.ioff() figprops = dict(dpi=144) fig = pyplot.figure(**figprops) # Second sub-plot, the normalized wavelet power spectrum and significance # level contour lines and cone of influece hatched area. Note that period # scale is logarithmic. bx = pyplot.axes([0.1, 0.37, 0.65, 0.28]) levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16] bx.contourf(t, period, numpy.log2(power), numpy.log2(levels), extend='both', cmap=pyplot.cm.viridis) extent = [t.min(), t.max(), 0, max(period)] bx.contour(t, period, sig95, [-99, 1], colors='k', linewidths=2, extent=extent) bx.set_title('{} Wavelet Power Spectrum ({})'.format(label, mother.name)) bx.set_ylabel('Period (minutes)') # #Yticks = 2 ** numpy.arange(numpy.ceil(numpy.log2(period.min())), # numpy.ceil(numpy.log2(period.max()))) #bx.set_yticks(numpy.log2(Yticks)) #bx.set_yticklabels(Yticks) bx.set_ylim([2, 20]) # Save graph to file # TODO implement #pyplot.savefig('{}/{}.png'.format(out_folder, out_filename)) # ---------------------------------------------- # or show the graph pyplot.show()
t = numpy.arange(0, N) * dt + t0 p = numpy.polyfit(t - t0, dat, 1) dat_notrend = dat - numpy.polyval(p, t - t0) std = dat_notrend.std() # Standard deviation var = std**2 # Variance dat_norm = dat_notrend / std # Normalized dataset ## Define wavelet parameters mother = wavelet.Morlet(6) s0 = 2 * dt # Starting scale, in this case 2 * 0.25 years = 6 months dj = 1 / 12 # Twelve sub-octaves per octaves J = 7 / dj # Seven powers of two with dj sub-octaves alpha, _, _ = wavelet.ar1(dat) # Lag-1 autocorrelation for red noise wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(dat_norm, dt, dj, s0, J, mother) iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std power = (numpy.abs(wave))**2 fft_power = numpy.abs(fft)**2 period = 1 / freqs power /= scales[:, None] signif, fft_theor = wavelet.significance(1.0, dt, scales, 0, alpha, significance_level=0.95, wavelet=mother)
def wavelet_analysis(z, tm, lon=None, lat=None, mother='Morlet', alpha=0.0, siglvl=0.95, loc=None, onlyloc=False, periods=None, sel_periods=[], show=False, save='', dsave='', prefix='', labels=dict(), title=None, name=None, fpath='', fpattern='', std=dict(), crange=None, levels=None, cmap=cm.GMT_no_green, debug=False): """Continuous wavelet transform and significance analysis. The analysis is made using the methodology and statistical approach suggested by Torrence and Compo (1998). Depending on the dimensions of the input array, three different kinds of approaches are taken. If the input array is one-dimensional then only a simple analysis is performed. If the array is bi- or three-dimensional then spectral Hovmoller diagrams are drawn for each Fourier period given within a range of +/-25%. PARAMETERS z (array like) : Input data. The data array should have one of these forms, z[tm], z[tm, lat] or z[tm, lat, lon]. tm (array like) : Time axis. It should contain values in matplotlib date format (i.e. number of days since 0001-01-01 UTC). lon (array like, optional) : Longitude. lat (array like, optional) : Latitude. mother (string, optional) : Gives the name of the mother wavelet to be used. Possible values are 'Morlet' (default), 'Paul' or 'Mexican hat'. alpha (float or dictionary, optional) : Lag-1 autocorrelation for background noise. Default value is 0.0 (white noise). If different autocorrelation coefficients should be used for different locations, then the input should contain a dictionary with 'lon', 'lat', 'map' keys as for the std parameter. siglvl (float, optional) : Significance level. Default value is 0.95. loc (array like, optional) : Special locations of interest. If the input array is of higher dimenstions, the output of the simple wavelet analysis of each of the locations is output. The list should contain the pairs of (lon, lat) for each locations of interest. onlyloc (boolean, optional) : If set to true then only the specified locations are analysed. The default is false. periods (array like, optional) : Special Fourier periods of interest in case of analysis of higher dimensions (in years). sel_periods (array like, optional) : Select which Fourier periods spectral power are averaged. show (boolean, optional) : If set to true the the resulting maps are shown on screen. save (string, optional) : The path in which the resulting plots are to be saved. If not set, then no images will be saved. dsave (string, optional) : If set, saves the scale averaged power spectrum series to this path. This is especially useful if memory is an issue. prefix (string, optional) : Prefix to retain naming conventions such as basin. labels (dictionary, optional) : Sets the labels for the plot axis. title (string, array like, optional) : Title of each of the selected periods. name (string, array like, optional) : Name of each of the selected periods. Used when saving the results to files. fpath (string, optional) : Path for the source files to be loaded when memory issues are a concern. fpattern (string, optional) : Regular expression pattern to match file names. std (dictionary, optional) : A dictionary containing a map of the standard deviation of the analysed time series. To set the longitude and latitude coordinates of the map, they should be included as separate 'lon' and 'lat' key items. If they are omitted, then the regular input parameters are assumed. Accepted standard deviation error is set in key 'err' (default value is 1e-2). crange (array like, optional) : Array of power levels to be used in average Hovmoler colour bar. levels (array like, optional) : Array of power levels to be used in spectrogram colour bar. cmap (colormap, optional) : Sets the colour map to be used in the plots. The default is the Generic Mapping Tools (GMT) no green. debug (boolean, optional) : If set to True then warnings are shown. OUTPUT If show or save are set, plots either on screen and or on file according to the specified parameters. If dsave parameter is set, also saves the scale averaged power series to files. RETURNS wave (dictionary) : Dictionary containing the resulting calculations from the wavelet analysis according to the input parameters. The output items might be: scale -- Wavelet scales. period -- Equivalent Fourier periods (in days). power_spectrum -- Wavelet power spectrum (in units**2). power_significance -- Relative significance of the power spectrum. global_power -- Global wavelet power spectrum (in units**2). scale_spectrum -- Scale averaged wavelet spectra (in units**2) according to selected periods. scale_significance -- Relative significance of the scale averaged wavelet spectra. fft -- Fourier spectrum. fft_first -- Fourier spectrum of the first half of the time-series. fft_second -- Fourier spectrum of the second half of the time-series. fft_period -- Fourier periods (in days). trend -- Signal trend (in units/yr). wavelet_trend -- Wavelet spectrum trends (in units**2/yr). """ t1 = time() result = {} # Resseting unit labels for hovmoller plots hlabels = dict(labels) hlabels['units'] = '' # Setting some titles and paths if name == None: name = title # Working with the std parameter and setting its properties: if 'val' in std.keys(): if 'lon' not in std.keys(): std['lon'] = lon std['lon180'] = common.lon180(std['lon']) if 'lat' not in std.keys(): std['lat'] = lat if 'err' not in std.keys(): std['err'] = 1e-2 std['map'] = True else: std['map'] = False # Lag-1 autocorrelation parameter if type(alpha).__name__ == 'dict': if 'lon' not in alpha.keys(): alpha['lon'] = lon alpha['lon180'] = common.lon180(alpha['lon']) if 'lat' not in alpha.keys(): alpha['lat'] = lat alpha['mean'] = alpha['val'].mean() alpha['map'] = True alpha['calc'] = False else: if alpha == -1: alpha = {'mean': -1, 'calc': True} else: alpha = {'val': alpha, 'mean': alpha, 'map': False, 'calc': False} # Shows some of the options on screen. print('Average Lag-1 autocorrelation for background noise: %.2f' % (alpha['mean'])) if save: print 'Saving result figures in \'%s\'.' % (save) if dsave: print 'Saving result data in \'%s\'.' % (dsave) if fpath: # Gets the list of files to be loaded individually extracts all the # latitudes and loads the first file to get the main parameters. flist = os.listdir(fpath) flist, match = common.reglist(flist, fpattern) if len(flist) == 0: raise Warning, 'No files matched search pattern.' flist = numpy.asarray(flist) lst_lat = [] for item in match: y = string.atof(item[-2]) if item[-1].upper() == 'S': y *= -1 lst_lat.append(y) # Detect file type from file name ftype = fm.detect_ftype(flist[0]) x, y, tm, z = fm.load_map('%s/%s' % (fpath, flist[0]), ftype=ftype, masked=True) if lon == None: lon = x lat = numpy.unique(lst_lat) dim = 2 else: # Transforms input arrays in numpy arrays and numpy masked arrays. tm = numpy.asarray(tm) z = numpy.ma.asarray(z) z.mask = numpy.isnan(z) # Determines the number of dimensions of the variable to be plotted and # the sizes of each dimension. a = b = c = None dim = len(z.shape) if dim == 3: c, b, a = z.shape elif dim == 2: c, a = z.shape b = 1 z = z.reshape(c, b, a) else: c = z.shape[0] a = b = 1 z = z.reshape(c, b, a) if tm.size != c: raise Warning, 'Time and data lengths do not match.' # Transforms coordinate arrays into numpy arrays s = type(lat).__name__ if s in ['int', 'float', 'float64']: lat = numpy.asarray([lat]) elif s != 'NoneType': lat = numpy.asarray(lat) s = type(lon).__name__ if s in ['int', 'float', 'float64']: lon = numpy.asarray([lon]) elif s != 'NoneType': lon = numpy.asarray(lon) # Starts the mother wavelet class instance and determines important # analysis parameters mother = mother.lower() if mother == 'morlet': mother = wavelet.Morlet() elif mother == 'paul': mother = wavelet.Paul() elif mother in ['mexican hat', 'mexicanhat', 'mexican_hat']: mother = wavelet.Mexican_hat() else: raise Warning, 'Mother wavelet unknown.' t = tm / common.daysinyear # Time array in years dt = tm[1] - tm[0] # Temporal sampling interval try: # Zonal sampling interval dx = lon[1] - lon[0] except: dx = 1 try: # Meridional sampling interval dy = lat[1] - lat[0] except: dy = dx if numpy.isnan(dt): dt = 1 if numpy.isnan(dx): dx = 1 if numpy.isnan(dy): dy = dx dj = 0.25 # Four sub-octaves per octave s0 = 2 * dt # Smallest scale J = 7 / dj - 1 # Seven powers of two with dj sub-octaves scales = period = None if type(crange).__name__ == 'NoneType': crange = numpy.arange(0, 1.1, 0.1) if type(levels).__name__ == 'NoneType': levels = 2.**numpy.arange(-3, 6) if fpath: N = lat.size # TODO: refactoring # lon = numpy.arange(-81. - dx / 2., 290. + dx / 2, dx) # TODO: refactoring # lat = numpy.unique(numpy.asarray(lst_lat)) c, b, a = tm.size, lat.size, lon.size else: N = a * b # Making sure that the longitudes range from -180 to 180 degrees and # setting the squared search radius R2. try: lon180 = common.lon180(lon) except: lon180 = None R2 = dx**2 + dy**2 if numpy.isnan(R2): R2 = 65535. if loc != None: loc = numpy.asarray([[common.lon180(item[0]), item[1]] for item in loc]) # Initializes important result variables such as the global wavelet power # spectrum map, scale avaraged spectrum time-series and their significance, # wavelet power trend map. global_power = numpy.ma.empty([J + 1, b, a]) * numpy.nan try: C = len(periods) + 1 dT = numpy.diff(periods) pmin = numpy.concatenate([[periods[0] - dT[0] / 2], 0.5 * (periods[:-1] + periods[1:])]) pmax = numpy.concatenate( [0.5 * (periods[:-1] + periods[1:]), [periods[-1] + dT[-1] / 2]]) except: # Sets the lowest period to null and the highest to half the time # series length. C = 1 pmin = numpy.array([0]) pmax = numpy.array([(tm[-1] - tm[0]) / 2]) if type(sel_periods).__name__ in ['int', 'float']: sel_periods = [sel_periods] elif len(sel_periods) == 0: sel_periods = [-1.] try: if fpath: raise Warning, 'Process files individually' avg_spectrum = numpy.ma.empty([C, c, b, a]) * numpy.nan mem_error = False except: avg_spectrum = numpy.ma.empty([C, c, a]) * numpy.nan mem_error = True avg_spectrum_signif = numpy.ma.empty([C, b, a]) * numpy.nan trend = numpy.ma.empty([b, a]) * numpy.nan wavelet_trend = numpy.ma.empty([C, b, a]) * numpy.nan fft_trend = numpy.ma.empty([C, b, a]) * numpy.nan std_map = numpy.ma.empty([b, a]) * numpy.nan zero = numpy.ma.empty([c, a]) fft_spectrum = None fft_spectrum1 = None fft_spectrum2 = None # Walks through each latitude and then through each longitude to perform # the temporal wavelet analysis. if N == 1: plural = '' else: plural = 's' s = 'Spectral analysis of %d location%s... ' % (N, plural) stdout.write(s) stdout.flush() for j in range(b): t2 = time() isloc = False # Ressets 'is special location' flag hloc = [] # Cleans location list for Hovmoller plots zero *= numpy.nan if mem_error: # Clears average spectrum for next step. avg_spectrum *= numpy.nan avg_spectrum.mask = False if fpath: findex = pylab.find(lst_lat == lat[j]) if len(findex) == 0: continue ftype = fm.detect_ftype(flist[findex[0]]) try: x, y, tm, z = fm.load_dataset(fpath, flist=flist[findex], ftype=ftype, masked=True, lon=lon, lat=lat[j:j + 1], verbose=True) except: continue z = z[:, 0, :] x180 = common.lon180(x) # Determines the first and second halves of the time-series and some # constants for the FFT fft_ta = numpy.ceil(t.min()) fft_tb = numpy.floor(t.max()) fft_tc = numpy.round(fft_ta + fft_tb) / 2 fft_ia = pylab.find((t >= fft_ta) & (t <= fft_tc)) fft_ib = pylab.find((t >= fft_tc) & (t <= fft_tb)) fft_N = int(2**numpy.ceil(numpy.log2(max([len(fft_ia), len(fft_ib)])))) fft_N2 = fft_N / 2 - 1 fft_dt = t[fft_ib].mean() - t[fft_ia].mean() for i in range(a): # Some string output. try: Y, X = common.num2latlon(lon[i], lat[j], mode='each', padding=False) except: Y = X = '?' # Extracts individual time-series from the whole dataset and # sets or calculates its standard deviation, squared standard # deviation and finally the normalized time-series. if fpath: try: ilon = pylab.find(x == lon[i])[0] fz = z[:, ilon] except: continue else: fz = z[:, j, i] if fz.mask.all(): continue if std['map']: try: u = pylab.find(std['lon180'] == lon180[i])[0] v = pylab.find(std['lat'] == lat[j])[0] except: if debug: warnings.warn( 'Unable to locate standard deviation ' 'for (%s, %s)' % (X, Y), Warning) continue fstd = std['val'][v, u] estd = fstd - fz.std() if (estd < 0) & (abs(estd) > std['err']): if debug: warnings.warn('Discrepant input standard deviation ' '(%f) location (%.3f, %.3f) will be ' 'disregarded.' % (estd, lon180[i], lat[j])) continue else: fstd = fz.std() fstd2 = fstd**2 std_map[j, i] = fstd zero[:, i] = fz fz = (fz - fz.mean()) / fstd # Calculates the distance of the current point to any special # location set in the 'loc' parameter. If only special locations # are to be analysed, then skips all other ones. If the input # array is one dimensional, then do the analysis anyway. if dim == 1: dist = numpy.asarray([0.]) else: try: dist = numpy.asarray([ ((item[0] - (lon180[i]))**2 + (item[1] - lat[j])**2) for item in loc ]) except: dist = [] if (dist > R2).all() & (loc != 'all') & onlyloc: continue # Determines the lag-1 autocorrelation coefficient to be used in # the significance test from the input parameter if alpha['calc']: ac = acorr(fz) alpha_ij = (ac[c + 1] + ac[c + 2]**0.5) / 2 elif alpha['map']: try: u = pylab.find(alpha['lon180'] == lon180[i])[0] v = pylab.find(alpha['lat'] == lat[j])[0] alpha_ij = alpha['val'][v, u] except: if debug: warnings.warn( 'Unable to locate standard deviation ' 'for (%s, %s) using mean value instead' % (X, Y), Warning) alpha_ij = alpha['mean'] else: alpha_ij = alpha['mean'] # Calculates the continuous wavelet transform using the wavelet # Python module. Calculates the wavelet and Fourier power spectrum # and the periods in days. Also calculates the Fourier power # spectrum for the first and second halves of the timeseries. wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt( fz, dt, dj, s0, J, mother) power = abs(wave * wave.conj()) fft_power = abs(fft * fft.conj()) period = 1. / freqs fftperiod = 1. / fftfreqs psel = pylab.find(period <= pmax.max()) # Calculates the Fourier transform for the first and the second # halves ot the time-series for later trend analysis. fft_1 = numpy.fft.fft(fz[fft_ia], fft_N)[1:fft_N / 2] / fft_N**0.5 fft_2 = numpy.fft.fft(fz[fft_ib], fft_N)[1:fft_N / 2] / fft_N**0.5 fft_p1 = abs(fft_1 * fft_1.conj()) fft_p2 = abs(fft_2 * fft_2.conj()) # Creates FFT return array and stores the spectrum accordingly try: fft_spectrum[:, j, i] = fft_power * fstd2 fft_spectrum1[:, j, i] = fft_p1 * fstd2 fft_spectrum2[:, j, i] = fft_p2 * fstd2 except: fft_spectrum = (numpy.ma.empty([len(fft_power), b, a]) * numpy.nan) fft_spectrum1 = (numpy.ma.empty([fft_N2, b, a]) * numpy.nan) fft_spectrum2 = (numpy.ma.empty([fft_N2, b, a]) * numpy.nan) # fft_spectrum[:, j, i] = fft_power * fstd2 fft_spectrum1[:, j, i] = fft_p1 * fstd2 fft_spectrum2[:, j, i] = fft_p2 * fstd2 # Performs the significance test according to the article by # Torrence and Compo (1998). The wavelet power is significant # if the ratio power/sig95 is > 1. signif, fft_theor = wavelet.significance(1., dt, scales, 0, alpha_ij, significance_level=siglvl, wavelet=mother) sig95 = (signif * numpy.ones((c, 1))).transpose() sig95 = power / sig95 # Calculates the global wavelet power spectrum and its # significance. The global wavelet spectrum is the average of the # wavelet power spectrum over time. The degrees of freedom (dof) # have to be corrected for padding at the edges. glbl_power = power.mean(axis=1) dof = c - scales glbl_signif, tmp = wavelet.significance(1., dt, scales, 1, alpha_ij, significance_level=siglvl, dof=dof, wavelet=mother) global_power[:, j, i] = glbl_power * fstd2 # Calculates the average wavelet spectrum along the scales and its # significance according to Torrence and Compo (1998) eq. 24. The # scale_avg_full variable is used multiple times according to the # selected periods range. # # Also calculates the average Fourier power spectrum. Cdelta = mother.cdelta scale_avg_full = (scales * numpy.ones((c, 1))).transpose() scale_avg_full = power / scale_avg_full for k in range(C): if k == 0: sel = pylab.find((period >= pmin[0]) & (period <= pmax[-1])) pminmax = [period[sel[0]], period[sel[-1]]] les = pylab.find((fftperiod >= pmin[0]) & (fftperiod <= pmax[-1])) fminmax = [fftperiod[les[0]], fftperiod[les[-1]]] else: sel = pylab.find((period >= pmin[k - 1]) & (period < pmax[k - 1])) pminmax = [pmin[k - 1], pmax[k - 1]] les = pylab.find((fftperiod >= pmin[k - 1]) & (fftperiod <= pmax[k - 1])) fminmax = [fftperiod[les[0]], fftperiod[les[-1]]] scale_avg = numpy.ma.array( (dj * dt / Cdelta * scale_avg_full[sel, :].sum(axis=0))) scale_avg_signif, tmp = wavelet.significance( 1., dt, scales, 2, alpha_ij, significance_level=siglvl, dof=[scales[sel[0]], scales[sel[-1]]], wavelet=mother) scale_avg.mask = (scale_avg < scale_avg_signif) if mem_error: avg_spectrum[k, :, i] = scale_avg else: avg_spectrum[k, :, j, i] = scale_avg avg_spectrum_signif[k, j, i] = scale_avg_signif # Trend analysis using least square polynomial fit of one # degree of the original input data and scale averaged # wavelet power. The wavelet power trend is calculated only # where the cone of influence spans the highest analyzed # period. In the end, the returned value for the trend is in # units**2. # # Also calculates the trends in the Fourier power spectrum. # Note that the FFT power spectrum is already multiplied by # the signal's standard deviation. incoi = pylab.find(coi >= pmax[-1]) if len(incoi) == 0: incoi = numpy.arange(c) polyw = numpy.polyfit(t[incoi], scale_avg[incoi].data, 1) wavelet_trend[k, j, i] = polyw[0] * fstd2 fft_trend[k, j, i] = ( fft_spectrum2[les[les < fft_N2], j, i] - fft_spectrum1[les[les < fft_N2], j, i]).mean() / fft_dt if k == 0: polyz = numpy.polyfit(t, fz * fstd, 1) trend[j, i] = polyz[0] # Plots the wavelet analysis results for the individual # series. The plot is only generated if the dimension of the # input variable z is one, if a special location is within a # range of the search radius R and if the show or save # parameters are set. if (show | (save != '')) & ((k in sel_periods)): if (dist < R2).any() | (loc == 'all') | (dim == 1): # There is an interesting spot within the search # radius of location (%s, %s).' % (Y, X) isloc = True if (dist < R2).any(): try: hloc.append(loc[(dist < R2)][0, 0]) except: pass if save: try: sv = '%s/tz_%s_%s_%d' % ( save, prefix, common.num2latlon(lon[i], lat[j]), k) except: sv = '%s' % (save) else: sv = '' graphics.wavelet_plot(tm, period[psel], fz, power[psel, :], coi, glbl_power[psel], scale_avg.data, fft=fft, fft_period=fftperiod, power_signif=sig95[psel, :], glbl_signif=glbl_signif[psel], scale_signif=scale_avg_signif, pminmax=pminmax, labels=labels, normalized=True, std=fstd, ztrend=polyz, wtrend=polyw, show=show, save=sv, levels=levels, cmap=cmap) # Saves and/or plots the intermediate results as zonal temporal # diagrams. if dsave: for k in range(C): if k == 0: sv = '%s/%s/%s_%s.xt.gz' % ( dsave, 'global', prefix, common.num2latlon(lon[i], lat[j], mode='each')[0]) else: sv = '%s/%s/%s_%s.xt.gz' % ( dsave, name[k - 1].lower(), prefix, common.num2latlon(lon[i], lat[j], mode='each')[0]) if mem_error: fm.save_map(lon, tm, avg_spectrum[k, :, :].data, sv, lat[j]) else: fm.save_map(lon, tm, avg_spectrum[k, :, j, :].data, sv, lat[j]) if ((dim > 1) and (show or (save != '')) & (not onlyloc) and len(hloc) > 0): hloc = common.lon360(numpy.unique(hloc)) if save: sv = '%s/xt_%s_%s' % (save, prefix, common.num2latlon( lon[i], lat[j], mode='each')[0]) else: sv = '' if mem_error: # To include overlapping original signal, use zz=zero gis.hovmoller(lon, tm, avg_spectrum[1:, :, :], zo=avg_spectrum_signif[1:, j, :], title=title, crange=crange, show=show, save=sv, labels=hlabels, loc=hloc, cmap=cmap, bottom='avg', right='avg', std=std_map[j, :]) else: gis.hovmoller(lon, tm, avg_spectrum[1:, :, j, :], zo=avg_spectrum_signif[1:, j, :], title=title, crange=crange, show=show, save=sv, labels=hlabels, loc=hloc, cmap=cmap, bottom='avg', right='avg', std=std_map[j, :]) # Flushing profiling text. stdout.write(len(s) * '\b') s = 'Spectral analysis of %d location%s (%s)... %s ' % ( N, plural, Y, common.profiler(b, j + 1, 0, t1, t2)) stdout.write(s) stdout.flush() stdout.write('\n') result['scale'] = scales result['period'] = period if dim == 1: result['power_spectrum'] = power * fstd2 result['power_significance'] = sig95 result['cwt'] = wave result['fft'] = fft result['global_power'] = global_power result['scale_spectrum'] = avg_spectrum if fpath: result['lon'] = lon result['lat'] = lat result['scale_significance'] = avg_spectrum_signif result['trend'] = trend result['wavelet_trend'] = wavelet_trend result['fft_power'] = fft_spectrum result['fft_first'] = fft_spectrum1 result['fft_second'] = fft_spectrum2 result['fft_period'] = fftperiod result['fft_trend'] = fft_trend return result
def wavelet(self, signal, mother='morlet', plot=True): """ Takes a 1D signal and perfroms a continous wavelet transform. Parameters ---------- time: ndarray The 1D time series for the data data: ndarray The actual 1D data mother: string The name of the family. Acceptable values are Paul, Morlet, DOG, Mexican_hat plot: bool If True, will return a plot of the result. Returns ------- Examples -------- """ sig_level = 0.95 std2 = signal.std() ** 2 signal_orig = signal[:] signal = (signal - signal.mean())/ signal.std() t1 = np.linspace(0,self.period*signal.size,signal.size) wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(signal, self.period, wavelet=mother, dj=1/100) power = (np.abs(wave)) ** 2 period = 1/freqs # alpha, _, _ = wavelet.ar1(signal) alpha = 0.0 ## (variance=1 for the normalized SST) signif, fft_theor = wavelet.significance(1.0, self.period, scales, 0, alpha, significance_level=sig_level, wavelet=mother) sig95 = np.ones([1, signal.size]) * signif[:, None] sig95 = power / sig95 glbl_power = std2 * power.mean(axis=1) dof = signal.size - scales glbl_signif, tmp = wavelet.significance(std2, self.period, scales, 1, alpha, significance_level=sig_level, dof=dof, wavelet=mother) ## indices for stuff idx = self.find_closest(period,coi.max()) ## Into minutes t1 /= 60 period /= 60 coi /= 60 if plot: plt.figure(figsize=(12,12)) ax = plt.axes([0.1, 0.75, 0.65, 0.2]) ax.plot(t1, signal_orig-signal_orig.mean(), 'k', linewidth=1.5) extent = [t1.min(),t1.max(),0,max(period)] bx = plt.axes([0.1, 0.1, 0.65, 0.55], sharex=ax) im = NonUniformImage(bx, interpolation='nearest', extent=extent) im.set_cmap('cubehelix') im.set_data(t1, period[:idx], power[:idx,:]) bx.images.append(im) bx.contour(t1, period[:idx], sig95[:idx,:], [-99,1], colors='w', linewidths=2, extent=extent) bx.fill(np.concatenate([t1, t1[-1:]+self.period, t1[-1:]+self.period,t1[:1]-self.period, t1[:1]-self.period]), (np.concatenate([coi,[1e-9], period[-1:], period[-1:], [1e-9]])), 'k', alpha=0.3,hatch='x', zorder=100) bx.set_xlim(t1.min(),t1.max()) cx = plt.axes([0.77, 0.1, 0.2, 0.55], sharey=bx) cx.plot(glbl_signif[:idx], period[:idx], 'k--') cx.plot(glbl_power[:idx], period[:idx], 'k-', linewidth=1.5) cx.set_ylim(([min(period), period[idx]])) plt.setp(cx.get_yticklabels(), visible=False) plt.show() return wave, scales, freqs, coi, power
import pycwt import numpy as np from pylab import * filename = 'sst_nino3.dat' data = loadtxt(filename) # remove mean data = (data - np.nansum(data) / len(data)) data[np.isnan(data)] = 0 t = pycwt.cwt(data, pycwt.Morlet(), octaves=8, dscale=0.1) b = pycwt.bootstrap_signif(t, 200) imshow(t.power(), aspect='auto') contour(b, levels=[0.05], colors='w') figure() plot(pycwt.time_avg(t), t.scales)
# returns a a list with containing [wave, scales, freqs, coi, fft, fftfreqs] # variables. mother = wavelet.Morlet(6) # Morlet mother wavelet with m=6 slevel = 0.95 # Significance level dj = 1/12 # Twelve sub-octaves per octaves s0 = -1 # 2 * dt # Starting scale, here 6 months J = -1 # 7 / dj # Seven powers of two with dj sub-octaves if True: alpha1, _, _ = wavelet.ar1(s1) # Lag-1 autocorrelation for red noise alpha2, _, _ = wavelet.ar1(s2) # Lag-1 autocorrelation for red noise else: alpha1 = alpha2 = 0.0 # Lag-1 autocorrelation for white noise # The following routines perform the wavelet transform and siginificance # analysis for two data sets. W1, scales1, freqs1, coi1, _, _ = wavelet.cwt(s1/std1, dt, dj, s0, J, mother) signif1, fft_theor1 = wavelet.significance(1.0, dt, scales1, 0, alpha1, significance_level=slevel, wavelet=mother) W2, scales2, freqs2, coi2, _, _ = wavelet.cwt(s2/std2, dt, dj, s0, J, mother) signif2, fft_theor2 = wavelet.significance(1.0, dt, scales2, 0, alpha2, significance_level=slevel, wavelet=mother) power1 = (np.abs(W1)) ** 2 # Normalized wavelet power spectrum power2 = (np.abs(W2)) ** 2 # Normalized wavelet power spectrum period1 = 1/freqs1 period2 = 1/freqs2 sig95_1 = np.ones([1, n1]) * signif1[:, None] sig95_1 = power1 / sig95_1 # Where ratio > 1, power is significant sig95_2 = np.ones([1, n2]) * signif2[:, None]
def wavelet_analysis(z, tm, lon=None, lat=None, mother='Morlet', alpha=0.0, siglvl=0.95, loc=None, onlyloc=False, periods=None, sel_periods=[], show=False, save='', dsave='', prefix='', labels=dict(), title=None, name=None, fpath='', fpattern='', std=dict(), crange=None, levels=None, cmap=cm.GMT_no_green, debug=False): """Continuous wavelet transform and significance analysis. The analysis is made using the methodology and statistical approach suggested by Torrence and Compo (1998). Depending on the dimensions of the input array, three different kinds of approaches are taken. If the input array is one-dimensional then only a simple analysis is performed. If the array is bi- or three-dimensional then spectral Hovmoller diagrams are drawn for each Fourier period given within a range of +/-25%. PARAMETERS z (array like) : Input data. The data array should have one of these forms, z[tm], z[tm, lat] or z[tm, lat, lon]. tm (array like) : Time axis. It should contain values in matplotlib date format (i.e. number of days since 0001-01-01 UTC). lon (array like, optional) : Longitude. lat (array like, optional) : Latitude. mother (string, optional) : Gives the name of the mother wavelet to be used. Possible values are 'Morlet' (default), 'Paul' or 'Mexican hat'. alpha (float or dictionary, optional) : Lag-1 autocorrelation for background noise. Default value is 0.0 (white noise). If different autocorrelation coefficients should be used for different locations, then the input should contain a dictionary with 'lon', 'lat', 'map' keys as for the std parameter. siglvl (float, optional) : Significance level. Default value is 0.95. loc (array like, optional) : Special locations of interest. If the input array is of higher dimenstions, the output of the simple wavelet analysis of each of the locations is output. The list should contain the pairs of (lon, lat) for each locations of interest. onlyloc (boolean, optional) : If set to true then only the specified locations are analysed. The default is false. periods (array like, optional) : Special Fourier periods of interest in case of analysis of higher dimensions (in years). sel_periods (array like, optional) : Select which Fourier periods spectral power are averaged. show (boolean, optional) : If set to true the the resulting maps are shown on screen. save (string, optional) : The path in which the resulting plots are to be saved. If not set, then no images will be saved. dsave (string, optional) : If set, saves the scale averaged power spectrum series to this path. This is especially useful if memory is an issue. prefix (string, optional) : Prefix to retain naming conventions such as basin. labels (dictionary, optional) : Sets the labels for the plot axis. title (string, array like, optional) : Title of each of the selected periods. name (string, array like, optional) : Name of each of the selected periods. Used when saving the results to files. fpath (string, optional) : Path for the source files to be loaded when memory issues are a concern. fpattern (string, optional) : Regular expression pattern to match file names. std (dictionary, optional) : A dictionary containing a map of the standard deviation of the analysed time series. To set the longitude and latitude coordinates of the map, they should be included as separate 'lon' and 'lat' key items. If they are omitted, then the regular input parameters are assumed. Accepted standard deviation error is set in key 'err' (default value is 1e-2). crange (array like, optional) : Array of power levels to be used in average Hovmoler colour bar. levels (array like, optional) : Array of power levels to be used in spectrogram colour bar. cmap (colormap, optional) : Sets the colour map to be used in the plots. The default is the Generic Mapping Tools (GMT) no green. debug (boolean, optional) : If set to True then warnings are shown. OUTPUT If show or save are set, plots either on screen and or on file according to the specified parameters. If dsave parameter is set, also saves the scale averaged power series to files. RETURNS wave (dictionary) : Dictionary containing the resulting calculations from the wavelet analysis according to the input parameters. The output items might be: scale -- Wavelet scales. period -- Equivalent Fourier periods (in days). power_spectrum -- Wavelet power spectrum (in units**2). power_significance -- Relative significance of the power spectrum. global_power -- Global wavelet power spectrum (in units**2). scale_spectrum -- Scale averaged wavelet spectra (in units**2) according to selected periods. scale_significance -- Relative significance of the scale averaged wavelet spectra. fft -- Fourier spectrum. fft_first -- Fourier spectrum of the first half of the time-series. fft_second -- Fourier spectrum of the second half of the time-series. fft_period -- Fourier periods (in days). trend -- Signal trend (in units/yr). wavelet_trend -- Wavelet spectrum trends (in units**2/yr). """ t1 = time() result = {} # Resseting unit labels for hovmoller plots hlabels = dict(labels) hlabels['units'] = '' # Setting some titles and paths if name == None: name = title # Working with the std parameter and setting its properties: if 'val' in std.keys(): if 'lon' not in std.keys(): std['lon'] = lon std['lon180'] = common.lon180(std['lon']) if 'lat' not in std.keys(): std['lat'] = lat if 'err' not in std.keys(): std['err'] = 1e-2 std['map'] = True else: std['map'] = False # Lag-1 autocorrelation parameter if type(alpha).__name__ == 'dict': if 'lon' not in alpha.keys(): alpha['lon'] = lon alpha['lon180'] = common.lon180(alpha['lon']) if 'lat' not in alpha.keys(): alpha['lat'] = lat alpha['mean'] = alpha['val'].mean() alpha['map'] = True alpha['calc'] = False else: if alpha == -1: alpha = {'mean': -1, 'calc': True} else: alpha = {'val': alpha, 'mean': alpha, 'map': False, 'calc': False} # Shows some of the options on screen. print ('Average Lag-1 autocorrelation for background noise: %.2f' % (alpha['mean'])) if save: print 'Saving result figures in \'%s\'.' % (save) if dsave: print 'Saving result data in \'%s\'.' % (dsave) if fpath: # Gets the list of files to be loaded individually extracts all the # latitudes and loads the first file to get the main parameters. flist = os.listdir(fpath) flist, match = common.reglist(flist, fpattern) if len(flist) == 0: raise Warning, 'No files matched search pattern.' flist = numpy.asarray(flist) lst_lat = [] for item in match: y = string.atof(item[-2]) if item[-1].upper() == 'S': y *= -1 lst_lat.append(y) # Detect file type from file name ftype = fm.detect_ftype(flist[0]) x, y, tm, z = fm.load_map('%s/%s' % (fpath, flist[0]), ftype=ftype, masked=True) if lon == None: lon = x lat = numpy.unique(lst_lat) dim = 2 else: # Transforms input arrays in numpy arrays and numpy masked arrays. tm = numpy.asarray(tm) z = numpy.ma.asarray(z) z.mask = numpy.isnan(z) # Determines the number of dimensions of the variable to be plotted and # the sizes of each dimension. a = b = c = None dim = len(z.shape) if dim == 3: c, b, a = z.shape elif dim == 2: c, a = z.shape b = 1 z = z.reshape(c, b, a) else: c = z.shape[0] a = b = 1 z = z.reshape(c, b, a) if tm.size != c: raise Warning, 'Time and data lengths do not match.' # Transforms coordinate arrays into numpy arrays s = type(lat).__name__ if s in ['int', 'float', 'float64']: lat = numpy.asarray([lat]) elif s != 'NoneType': lat = numpy.asarray(lat) s = type(lon).__name__ if s in ['int', 'float', 'float64']: lon = numpy.asarray([lon]) elif s != 'NoneType': lon = numpy.asarray(lon) # Starts the mother wavelet class instance and determines important # analysis parameters mother = mother.lower() if mother == 'morlet': mother = wavelet.Morlet() elif mother == 'paul': mother = wavelet.Paul() elif mother in ['mexican hat', 'mexicanhat', 'mexican_hat']: mother = wavelet.Mexican_hat() else: raise Warning, 'Mother wavelet unknown.' t = tm / common.daysinyear # Time array in years dt = tm[1] - tm[0] # Temporal sampling interval try: # Zonal sampling interval dx = lon[1] - lon[0] except: dx = 1 try: # Meridional sampling interval dy = lat[1] - lat[0] except: dy = dx if numpy.isnan(dt): dt = 1 if numpy.isnan(dx): dx = 1 if numpy.isnan(dy): dy = dx dj = 0.25 # Four sub-octaves per octave s0 = 2 * dt # Smallest scale J = 7 / dj - 1 # Seven powers of two with dj sub-octaves scales = period = None if type(crange).__name__ == 'NoneType': crange = numpy.arange(0, 1.1, 0.1) if type(levels).__name__ == 'NoneType': levels = 2. ** numpy.arange(-3, 6) if fpath: N = lat.size # TODO: refactoring # lon = numpy.arange(-81. - dx / 2., 290. + dx / 2, dx) # TODO: refactoring # lat = numpy.unique(numpy.asarray(lst_lat)) c, b, a = tm.size, lat.size, lon.size else: N = a * b # Making sure that the longitudes range from -180 to 180 degrees and # setting the squared search radius R2. try: lon180 = common.lon180(lon) except: lon180 = None R2 = dx ** 2 + dy ** 2 if numpy.isnan(R2): R2 = 65535. if loc != None: loc = numpy.asarray([[common.lon180(item[0]), item[1]] for item in loc]) # Initializes important result variables such as the global wavelet power # spectrum map, scale avaraged spectrum time-series and their significance, # wavelet power trend map. global_power = numpy.ma.empty([J + 1, b, a]) * numpy.nan try: C = len(periods) + 1 dT = numpy.diff(periods) pmin = numpy.concatenate([[periods[0] - dT[0] / 2], 0.5 * (periods[:-1] + periods[1:])]) pmax = numpy.concatenate([0.5 * (periods[:-1] + periods[1:]), [periods[-1] + dT[-1] / 2]]) except: # Sets the lowest period to null and the highest to half the time # series length. C = 1 pmin = numpy.array([0]) pmax = numpy.array([(tm[-1] - tm[0]) / 2]) if type(sel_periods).__name__ in ['int', 'float']: sel_periods = [sel_periods] elif len(sel_periods) == 0: sel_periods = [-1.] try: if fpath: raise Warning, 'Process files individually' avg_spectrum = numpy.ma.empty([C, c, b, a]) * numpy.nan mem_error = False except: avg_spectrum = numpy.ma.empty([C, c, a]) * numpy.nan mem_error = True avg_spectrum_signif = numpy.ma.empty([C, b, a]) * numpy.nan trend = numpy.ma.empty([b, a]) * numpy.nan wavelet_trend = numpy.ma.empty([C, b, a]) * numpy.nan fft_trend = numpy.ma.empty([C, b, a]) * numpy.nan std_map = numpy.ma.empty([b, a]) * numpy.nan zero = numpy.ma.empty([c, a]) fft_spectrum = None fft_spectrum1 = None fft_spectrum2 = None # Walks through each latitude and then through each longitude to perform # the temporal wavelet analysis. if N == 1: plural = '' else: plural = 's' s = 'Spectral analysis of %d location%s... ' % (N, plural) stdout.write(s) stdout.flush() for j in range(b): t2 = time() isloc = False # Ressets 'is special location' flag hloc = [] # Cleans location list for Hovmoller plots zero *= numpy.nan if mem_error: # Clears average spectrum for next step. avg_spectrum *= numpy.nan avg_spectrum.mask = False if fpath: findex = pylab.find(lst_lat == lat[j]) if len(findex) == 0: continue ftype = fm.detect_ftype(flist[findex[0]]) try: x, y, tm, z = fm.load_dataset(fpath, flist=flist[findex], ftype=ftype, masked=True, lon=lon, lat=lat[j:j+1], verbose=True) except: continue z = z[:, 0, :] x180 = common.lon180(x) # Determines the first and second halves of the time-series and some # constants for the FFT fft_ta = numpy.ceil(t.min()) fft_tb = numpy.floor(t.max()) fft_tc = numpy.round(fft_ta + fft_tb) / 2 fft_ia = pylab.find((t >= fft_ta) & (t <= fft_tc)) fft_ib = pylab.find((t >= fft_tc) & (t <= fft_tb)) fft_N = int(2 ** numpy.ceil(numpy.log2(max([len(fft_ia), len(fft_ib)])))) fft_N2 = fft_N / 2 - 1 fft_dt = t[fft_ib].mean() - t[fft_ia].mean() for i in range(a): # Some string output. try: Y, X = common.num2latlon(lon[i], lat[j], mode='each', padding=False) except: Y = X = '?' # Extracts individual time-series from the whole dataset and # sets or calculates its standard deviation, squared standard # deviation and finally the normalized time-series. if fpath: try: ilon = pylab.find(x == lon[i])[0] fz = z[:, ilon] except: continue else: fz = z[:, j, i] if fz.mask.all(): continue if std['map']: try: u = pylab.find(std['lon180'] == lon180[i])[0] v = pylab.find(std['lat'] == lat[j])[0] except: if debug: warnings.warn('Unable to locate standard deviation ' 'for (%s, %s)' % (X, Y), Warning) continue fstd = std['val'][v, u] estd = fstd - fz.std() if (estd < 0) & (abs(estd) > std['err']): if debug: warnings.warn('Discrepant input standard deviation ' '(%f) location (%.3f, %.3f) will be ' 'disregarded.' % (estd, lon180[i], lat[j])) continue else: fstd = fz.std() fstd2 = fstd ** 2 std_map[j, i] = fstd zero[:, i] = fz fz = (fz - fz.mean()) / fstd # Calculates the distance of the current point to any special # location set in the 'loc' parameter. If only special locations # are to be analysed, then skips all other ones. If the input # array is one dimensional, then do the analysis anyway. if dim == 1: dist = numpy.asarray([0.]) else: try: dist = numpy.asarray([((item[0] - (lon180[i])) ** 2 + (item[1] - lat[j]) ** 2) for item in loc]) except: dist = [] if (dist > R2).all() & (loc != 'all') & onlyloc: continue # Determines the lag-1 autocorrelation coefficient to be used in # the significance test from the input parameter if alpha['calc']: ac = acorr(fz) alpha_ij = (ac[c + 1] + ac[c + 2] ** 0.5) / 2 elif alpha['map']: try: u = pylab.find(alpha['lon180'] == lon180[i])[0] v = pylab.find(alpha['lat'] == lat[j])[0] alpha_ij = alpha['val'][v, u] except: if debug: warnings.warn('Unable to locate standard deviation ' 'for (%s, %s) using mean value instead' % (X, Y), Warning) alpha_ij = alpha['mean'] else: alpha_ij = alpha['mean'] # Calculates the continuous wavelet transform using the wavelet # Python module. Calculates the wavelet and Fourier power spectrum # and the periods in days. Also calculates the Fourier power # spectrum for the first and second halves of the timeseries. wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(fz, dt, dj, s0, J, mother) power = abs(wave * wave.conj()) fft_power = abs(fft * fft.conj()) period = 1. / freqs fftperiod = 1. / fftfreqs psel = pylab.find(period <= pmax.max()) # Calculates the Fourier transform for the first and the second # halves ot the time-series for later trend analysis. fft_1 = numpy.fft.fft(fz[fft_ia], fft_N)[1:fft_N/2] / fft_N ** 0.5 fft_2 = numpy.fft.fft(fz[fft_ib], fft_N)[1:fft_N/2] / fft_N ** 0.5 fft_p1 = abs(fft_1 * fft_1.conj()) fft_p2 = abs(fft_2 * fft_2.conj()) # Creates FFT return array and stores the spectrum accordingly try: fft_spectrum[:, j, i] = fft_power * fstd2 fft_spectrum1[:, j, i] = fft_p1 * fstd2 fft_spectrum2[:, j, i] = fft_p2 * fstd2 except: fft_spectrum = (numpy.ma.empty([len(fft_power), b, a]) * numpy.nan) fft_spectrum1 = (numpy.ma.empty([fft_N2, b, a]) * numpy.nan) fft_spectrum2 = (numpy.ma.empty([fft_N2, b, a]) * numpy.nan) # fft_spectrum[:, j, i] = fft_power * fstd2 fft_spectrum1[:, j, i] = fft_p1 * fstd2 fft_spectrum2[:, j, i] = fft_p2 * fstd2 # Performs the significance test according to the article by # Torrence and Compo (1998). The wavelet power is significant # if the ratio power/sig95 is > 1. signif, fft_theor = wavelet.significance(1., dt, scales, 0, alpha_ij, significance_level=siglvl, wavelet=mother) sig95 = (signif * numpy.ones((c, 1))).transpose() sig95 = power / sig95 # Calculates the global wavelet power spectrum and its # significance. The global wavelet spectrum is the average of the # wavelet power spectrum over time. The degrees of freedom (dof) # have to be corrected for padding at the edges. glbl_power = power.mean(axis=1) dof = c - scales glbl_signif, tmp = wavelet.significance(1., dt, scales, 1, alpha_ij, significance_level=siglvl, dof=dof, wavelet=mother) global_power[:, j, i] = glbl_power * fstd2 # Calculates the average wavelet spectrum along the scales and its # significance according to Torrence and Compo (1998) eq. 24. The # scale_avg_full variable is used multiple times according to the # selected periods range. # # Also calculates the average Fourier power spectrum. Cdelta = mother.cdelta scale_avg_full = (scales * numpy.ones((c, 1))).transpose() scale_avg_full = power / scale_avg_full for k in range(C): if k == 0: sel = pylab.find((period >= pmin[0]) & (period <= pmax[-1])) pminmax = [period[sel[0]], period[sel[-1]]] les = pylab.find((fftperiod >= pmin[0]) & (fftperiod <= pmax[-1])) fminmax = [fftperiod[les[0]], fftperiod[les[-1]]] else: sel = pylab.find((period >= pmin[k - 1]) & (period < pmax[k - 1])) pminmax = [pmin[k-1], pmax[k-1]] les = pylab.find((fftperiod >= pmin[k - 1]) & (fftperiod <= pmax[k - 1])) fminmax = [fftperiod[les[0]], fftperiod[les[-1]]] scale_avg = numpy.ma.array((dj * dt / Cdelta * scale_avg_full[sel, :].sum(axis=0))) scale_avg_signif, tmp = wavelet.significance(1., dt, scales, 2, alpha_ij, significance_level=siglvl, dof=[scales[sel[0]], scales[sel[-1]]], wavelet=mother) scale_avg.mask = (scale_avg < scale_avg_signif) if mem_error: avg_spectrum[k, :, i] = scale_avg else: avg_spectrum[k, :, j, i] = scale_avg avg_spectrum_signif[k, j, i] = scale_avg_signif # Trend analysis using least square polynomial fit of one # degree of the original input data and scale averaged # wavelet power. The wavelet power trend is calculated only # where the cone of influence spans the highest analyzed # period. In the end, the returned value for the trend is in # units**2. # # Also calculates the trends in the Fourier power spectrum. # Note that the FFT power spectrum is already multiplied by # the signal's standard deviation. incoi = pylab.find(coi >= pmax[-1]) if len(incoi) == 0: incoi = numpy.arange(c) polyw = numpy.polyfit(t[incoi], scale_avg[incoi].data, 1) wavelet_trend[k, j, i] = polyw[0] * fstd2 fft_trend[k, j, i] = (fft_spectrum2[les[les<fft_N2], j, i] - fft_spectrum1[les[les<fft_N2], j, i]).mean() / fft_dt if k == 0: polyz = numpy.polyfit(t, fz * fstd, 1) trend[j, i] = polyz[0] # Plots the wavelet analysis results for the individual # series. The plot is only generated if the dimension of the # input variable z is one, if a special location is within a # range of the search radius R and if the show or save # parameters are set. if (show | (save != '')) & ((k in sel_periods)): if (dist < R2).any() | (loc == 'all') | (dim == 1): # There is an interesting spot within the search # radius of location (%s, %s).' % (Y, X) isloc = True if (dist < R2).any(): try: hloc.append(loc[(dist < R2)][0, 0]) except: pass if save: try: sv = '%s/tz_%s_%s_%d' % (save, prefix, common.num2latlon(lon[i], lat[j]), k) except: sv = '%s' % (save) else: sv = '' graphics.wavelet_plot(tm, period[psel], fz, power[psel, :], coi, glbl_power[psel], scale_avg.data, fft=fft, fft_period=fftperiod, power_signif=sig95[psel, :], glbl_signif=glbl_signif[psel], scale_signif=scale_avg_signif, pminmax=pminmax, labels=labels, normalized=True, std=fstd, ztrend=polyz, wtrend=polyw, show=show, save=sv, levels=levels, cmap=cmap) # Saves and/or plots the intermediate results as zonal temporal # diagrams. if dsave: for k in range(C): if k == 0: sv = '%s/%s/%s_%s.xt.gz' % (dsave, 'global', prefix, common.num2latlon(lon[i], lat[j], mode='each')[0]) else: sv = '%s/%s/%s_%s.xt.gz' % (dsave, name[k - 1].lower(), prefix, common.num2latlon(lon[i], lat[j], mode='each')[0]) if mem_error: fm.save_map(lon, tm, avg_spectrum[k, :, :].data, sv, lat[j]) else: fm.save_map(lon, tm, avg_spectrum[k, :, j, :].data, sv, lat[j]) if ((dim > 1) and (show or (save != '')) & (not onlyloc) and len(hloc) > 0): hloc = common.lon360(numpy.unique(hloc)) if save: sv = '%s/xt_%s_%s' % (save, prefix, common.num2latlon(lon[i], lat[j], mode='each')[0]) else: sv = '' if mem_error: # To include overlapping original signal, use zz=zero gis.hovmoller(lon, tm, avg_spectrum[1:, :, :], zo=avg_spectrum_signif[1:, j, :], title=title, crange=crange, show=show, save=sv, labels=hlabels, loc=hloc, cmap=cmap, bottom='avg', right='avg', std=std_map[j, :]) else: gis.hovmoller(lon, tm, avg_spectrum[1:, :, j, :], zo=avg_spectrum_signif[1:, j, :], title=title, crange=crange, show=show, save=sv, labels=hlabels, loc=hloc, cmap=cmap, bottom='avg', right='avg', std=std_map[j, :]) # Flushing profiling text. stdout.write(len(s) * '\b') s = 'Spectral analysis of %d location%s (%s)... %s ' % (N, plural, Y, common.profiler(b, j + 1, 0, t1, t2)) stdout.write(s) stdout.flush() stdout.write('\n') result['scale'] = scales result['period'] = period if dim == 1: result['power_spectrum'] = power * fstd2 result['power_significance'] = sig95 result['cwt'] = wave result['fft'] = fft result['global_power'] = global_power result['scale_spectrum'] = avg_spectrum if fpath: result['lon'] = lon result['lat'] = lat result['scale_significance'] = avg_spectrum_signif result['trend'] = trend result['wavelet_trend'] = wavelet_trend result['fft_power'] = fft_spectrum result['fft_first'] = fft_spectrum1 result['fft_second'] = fft_spectrum2 result['fft_period'] = fftperiod result['fft_trend'] = fft_trend return result
data = data.ravel() # get the log spaced frequencies numfreqs = 50 nyq = np.floor_divide(sample_rate[0], 2.0) maxfreq = np.min([100, nyq]) minfreq = 2 freqs = np.logspace(np.log10(minfreq), np.log10(maxfreq), num=numfreqs) # make an empty ndarray to hold the freq * electrode * timepoint data powers_by_freq = np.zeros(shape=(len(freqs), og_shape[0], og_shape[1])) # convolve! for i, freq in enumerate(freqs): wav_transform = wavelet.cwt(data, 1 / sample_rate[0], freqs=np.full(1, freq), wavelet=wavelet.Morlet(4)) # get the power and reshape data back into original shape wav_transform = (np.abs(wav_transform[0])**2).reshape(og_shape) powers_by_freq[i] = np.log(wav_transform) # prep some variables for the robust regression done in parallel xs = np.log(freqs).reshape(-1, 1) midpoint = (np.log(maxfreq) - np.log(minfreq)) / 2 nworkers = int(config['nnodes'] * config['ppn'] * 0.5) # get the indices for the chunks chunk_indices = array_split(powers_by_freq, nworkers, axis=2) mhq = Queue(nworkers)
dj = 1 / 12 # Twelve sub-octaves per octaves s0 = -1 # 2 * dt # Starting scale, here 6 months J = -1 # 7 / dj # Seven powers of two with dj sub-octaves # alpha = 0.0 # Lag-1 autocorrelation for white noise try: alpha, _, _ = wavelet.ar1(dat) # Lag-1 autocorrelation for red noise except Warning: # When the dataset is too short, or there is a strong trend, ar1 raises a # warning. In this case, we assume a white noise background spectrum. alpha = 1.0 mother = wavelet.Morlet(6) # Morlet mother wavelet with m=6 # The following routines perform the wavelet transform and siginificance # analysis for the chosen data set. wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(dat, ds.dt, dj, s0, J, mother) iwave = wavelet.icwt(wave, scales, ds.dt, dj, mother) # Normalized wavelet and Fourier power spectra power = (numpy.abs(wave)) ** 2 fft_power = numpy.abs(fft) ** 2 period = 1 / freqs # Significance test. Where ratio power/sig95 > 1, power is significant. signif, fft_theor = wavelet.significance(1.0, ds.dt, scales, 0, alpha, significance_level=slevel, wavelet=mother) sig95 = numpy.ones([1, N]) * signif[:, None] sig95 = power / sig95 # Power rectification as of Liu et al. (2007). TODO: confirm if significance
def graph_wavelet(data_xs, title, lims, font = 11, params = default_params): a_lims, b_lims, d_lims = lims plt.rcParams.update({'font.size': font}) return_data = {} N = len(data_xs) dt = (2*params['per_pixel'])/N #This is how much cm each pixel equals t = np.arange(0, N) * dt t = t - np.mean(t) t0 = 0 per_min = params['min_per'] per_max = params['max_per'] units = params['units'] sx = params['sx'] octaves = params['octaves'] dj = 1/params['suboctaves'] #suboctaves order = params['order'] var, std, dat_norm = detrend(data_xs) mother = cwt.DOG(order) #This is the Mother Wavelet s0 = sx * dt #This is the starting scale, which in out case is two pixels or 0.04cm/40um\ J = octaves/dj #This is powers of two with dj suboctaves return_data['var'] = var return_data['std'] = std try: alpha, _, _ = cwt.ar1(dat_norm) #This calculates the Lag-1 autocorrelation for red noise except: alpha = 0.95 wave, scales, freqs, coi, fft, fftfreqs = cwt.cwt(dat_norm, dt, dj, s0, J, mother) return_data['scales'] = scales return_data['freqs'] = freqs return_data['fft'] = fft iwave = cwt.icwt(wave, scales, dt, dj, mother) * std power = (np.abs(wave)) ** 2 fft_power = np.abs(fft) ** 2 period = 1 / freqs power /= scales[:, None] #This is an option suggested by Liu et. al. #Next we calculate the significance of the power spectra. Significane where power / sig95 > 1 signif, fft_theor = cwt.significance(1.0, dt, scales, 0, alpha, significance_level=0.95, wavelet=mother) sig95 = np.ones([1, N]) * signif[:, None] sig95 = power / sig95 glbl_power = power.mean(axis=1) dof = N - scales # Correction for padding at edges glbl_signif, tmp = cwt.significance(var, dt, scales, 1, alpha, significance_level=0.95, dof=dof, wavelet=mother) sel = find((period >= per_min) & (period < per_max)) Cdelta = mother.cdelta scale_avg = (scales * np.ones((N, 1))).transpose() scale_avg = power / scale_avg # As in Torrence and Compo (1998) equation 24 scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0) scale_avg_signif, tmp = cwt.significance(var, dt, scales, 2, alpha, significance_level=0.95, dof=[scales[sel[0]], scales[sel[-1]]], wavelet=mother) # Prepare the figure plt.close('all') plt.ioff() figprops = dict(figsize=(11, 11), dpi=72) fig = plt.figure(**figprops) wx = plt.axes([0.77, 0.75, 0.2, 0.2]) imz = 0 for idxy in range(0,len(period), 10): wx.plot(t, mother.psi(t / period[idxy]) + imz, linewidth = 1.5) imz+=1 wx.xaxis.set_ticklabels([]) ax = plt.axes([0.1, 0.75, 0.65, 0.2]) ax.plot(t, data_xs, 'k', linewidth=1.5) ax.plot(t, iwave, '-', linewidth=1, color=[0.5, 0.5, 0.5]) ax.plot(t, dat_norm, '--', linewidth=1.5, color=[0.5, 0.5, 0.5]) if a_lims != None: ax.set_ylim([-a_lims, a_lims]) ax.set_title('a) {}'.format(title)) ax.set_ylabel(r'Displacement [{}]'.format(units)) #ax.set_ylim([-20,20]) bx = plt.axes([0.1, 0.37, 0.65, 0.28], sharex=ax) levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16] bx.contourf(t, np.log2(period), np.log2(power), np.log2(levels), extend='both', cmap=plt.cm.viridis) extent = [t.min(), t.max(), 0, max(period)] bx.contour(t, np.log2(period), sig95, [-99, 1], colors='k', linewidths=2, extent=extent) bx.fill(np.concatenate([t, t[-1:] + dt, t[-1:] + dt, t[:1] - dt, t[:1] - dt]), np.concatenate([np.log2(coi), [1e-9], np.log2(period[-1:]), np.log2(period[-1:]), [1e-9]]), 'k', alpha=0.3, hatch='x') bx.set_title('b) {} Octaves Wavelet Power Spectrum [{}({})]'.format(octaves, mother.name, order)) bx.set_ylabel('Period (cm)') # Yticks = 2 ** np.arange(np.ceil(np.log2(period.min())), np.ceil(np.log2(period.max()))) bx.set_yticks(np.log2(Yticks)) bx.set_yticklabels(Yticks) # Third sub-plot, the global wavelet and Fourier power spectra and theoretical # noise spectra. Note that period scale is logarithmic. cx = plt.axes([0.77, 0.37, 0.2, 0.28], sharey=bx) cx.plot(glbl_signif, np.log2(period), 'k--') cx.plot(var * fft_theor, np.log2(period), '--', color='#cccccc') cx.plot(var * fft_power, np.log2(1./fftfreqs), '-', color='#cccccc', linewidth=1.) return_data['global_power'] = var * glbl_power return_data['fourier_spectra'] = var * fft_power return_data['per'] = np.log2(period) return_data['amp'] = np.log2(1./fftfreqs) cx.plot(var * glbl_power, np.log2(period), 'k-', linewidth=1.5) cx.set_title('c) Power Spectrum') cx.set_xlabel(r'Power [({})^2]'.format(units)) if b_lims != None: cx.set_xlim([0,b_lims]) #cx.set_xlim([0,max(glbl_power.max(), var*fft_power.max())]) #print(max(glbl_power.max(), var*fft_power.max())) cx.set_ylim(np.log2([period.min(), period.max()])) cx.set_yticks(np.log2(Yticks)) cx.set_yticklabels(Yticks) return_data['yticks'] = Yticks plt.setp(cx.get_yticklabels(), visible=False) # Fourth sub-plot, the scale averaged wavelet spectrum. dx = plt.axes([0.1, 0.07, 0.65, 0.2], sharex=ax) dx.axhline(scale_avg_signif, color='k', linestyle='--', linewidth=1.) dx.plot(t, scale_avg, 'k-', linewidth=1.5) dx.set_title('d) {}--{} cm scale-averaged power'.format(per_min, per_max)) dx.set_xlabel('Displacement (cm)') dx.set_ylabel(r'Average variance [{}]'.format(units)) ax.set_xlim([t.min(), t.max()]) if d_lims != None: dx.set_ylim([0,d_lims]) plt.savefig("C:\pyscripts\wavelet_analysis\Calibrated Images\{}".format(title)) return fig, return_data
var = (var - var.mean()) / std # Calculating anomaly and normalizing N = var.size # Number of measurements time = np.arange(0, N) * dt + t0 # Time array in years dj = 1/12 # Twelve sub-octaves per octaves s0 = -1#2 * dt # Starting scale, here 6 months J = -1#7 / dj # Seven powers of two with dj sub-octaves #alpha = 0.0 # Lag-1 autocorrelation for white noise alpha, _, _ = wavelet.ar1(var) # Lag-1 autocorrelation for red noise mother = wavelet.Morlet(6) # Morlet mother wavelet with m=6 # The following routines perform the wavelet transform and siginificance # analysis for the chosen data set. wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(var, dt, dj, s0, J, mother) iwave = wavelet.icwt(wave, scales, dt, dj, mother) # Normalized wavelet and Fourier power spectra power = (np.abs(wave)) ** 2 fft_power = np.abs(fft) ** 2 period = 1/ freqs # Significance test. Where ratio power/sig95 > 1, power is significant. signif, fft_theor = wavelet.significance(1.0, dt, scales, 0, alpha, significance_level=slevel, wavelet=mother) sig95 = np.ones([1, N]) * signif[:, None] sig95 = power / sig95 # Power rectification as of Liu et al. (2007). TODO: confirm if significance # test ratio should be calculated first.