Пример #1
0
    def setMothorWavelet(self, mother=wavelet.Morlet(6)):
        """
        Set class of mother wavelet.
        In the pycwt module, general mother wavelet is not implemented
            (for example Morlet wavelet is only for ω0 = 6).

        Parameter
        =========
        mother: obj, optional (Morlet, Paul, Dog, MexicanHat)
            Function of mother wavelet.
            Default is Morlet wavelet with ω0 = 6.

        Return
        ======
        self

        Example
        =======
        import pycwt as wavelet

        cwt.setMotherWavelet(wavelet.Morlet(6))
        cwt.setMotherWavelet(wavelet.Paul())
        """
        self.feature.mother = mother
        return self
Пример #2
0
def plot_tfr(times, freqs, data, mother=None):
    '''
    Plots time frequency representations of analog signal with PSD estimation


    Parameters
    ----------
    times : array
    freqs : array
    power : array
    mother : wavelet
    '''
    import pycwt

    if mother is None:
        mother = pycwt.Morlet()
    sampling_period = times[1] - times[0]

    wave, scales, freqs, coi, fft, fftfreqs = pycwt.cwt(data, sampling_period, freqs=freqs, wavelet=mother)

    power = (numpy.abs(wave)) ** 2
    power /= scales[:, None] #rectify the power spectrum according to the suggestions proposed by Liu et al. (2007)
    fft_power = numpy.abs(fft) ** 2

    gs = gridspec.GridSpec(3, 3)
    ax_pow = plt.subplot(gs[:2, 1:3])
    ax_pow.set_xlim(*times[[0,-1]])
    ax_pow.set_ylim(*freqs[[0,-1]])

    ax_fft = plt.subplot(gs[:2, 0], sharey=ax_pow)
    ax_sig = plt.subplot(gs[2, 1:3], sharex=ax_pow)

    ax_pow.contourf(times, freqs, power, levels=100)
    ax_sig.plot(times, data)
    ax_fft.plot(fft_power, fftfreqs)
Пример #3
0
    def __init__(self, signal, time, frequency=100e3, wavelet='Mexican'):
        """

        Parameters
        ----------
        signal : ndarray
            Signal to be analyzed
        time : ndarray
            Time basis
        frequency : Float
            Fourier frequency for the analysis
        wavelet : :obj: `str`
            String indicating the type of wavelet used
            for the analysis. Default is 'Mexican'

        """

        if wavelet == 'Mexican':
            self.mother = wav.MexicanHat()
        elif wavelet == 'DOG1':
            self.mother = wav.DOG(m=1)
        elif wavelet == 'Morlet':
            self.mother = wav.Morlet()
        else:
            print 'Not a valid wavelet using Mexican'
            self.mother = wav.MexicanHat()
        # inizializza l'opportuna scala
        self.fr = frequency
        self.scale = 1. / self.mother.flambda() / self.fr
        self.sig = copy.deepcopy(signal)
        self.nsamp = signal.size
        self.time = copy.deepcopy(time)
        self.dt = (time.max() - time.min()) / (self.nsamp - 1)
        self.Fs = 1. / self.dt
        self.cwt()
Пример #4
0
def full_time_freq_coupling(timeseries,
                            num_nodes,
                            dt,
                            high_f=0.11,
                            low_f=0.005,
                            xWT_freq_threshold=0.005,
                            coi_freq_threshold=0.005,
                            num_sub_octaves=12,
                            verbose=True):
    mother = pycwt.Morlet(
        6
    )  #setting a Morlet wavelet with omega=6, which sets the time-frequency resolution of the wavelet and is recommended in Chang and Glover
    s0 = 1 / high_f
    dj = 1 / num_sub_octaves
    num_powers = 0
    while ((1 / high_f) * (2**num_powers) < 1 / low_f):
        num_powers += 1
    J = int(num_powers / dj)

    y1 = timeseries[:, 0]
    y2 = timeseries[:, 1]
    xWT, coi, freq = cross_wavelet_transform(y1,
                                             y2,
                                             dt,
                                             dj=dj,
                                             s0=s0,
                                             J=J,
                                             wavelet=mother,
                                             normalize=True)
    xWT = xWT[freq >= xWT_freq_threshold, :]
    coi_xWT = restrict_time_to_coi(
        xWT, coi, coi_freq_threshold
    )  # restrict timepoints to only where min freq is met
    full_coupling = np.zeros([
        int((num_nodes**2 - num_nodes) / 2), coi_xWT.shape[0], coi_xWT.shape[1]
    ],
                             dtype='complex128')

    i = 0
    for roi1 in range(num_nodes):
        for roi2 in range(roi1):
            y1 = timeseries[:, roi1]
            y2 = timeseries[:, roi2]
            xWT, coi, freq = cross_wavelet_transform(y1,
                                                     y2,
                                                     dt,
                                                     dj=dj,
                                                     s0=s0,
                                                     J=J,
                                                     wavelet=mother,
                                                     normalize=True)
            xWT = xWT[freq >= xWT_freq_threshold, :]
            coi_xWT = restrict_time_to_coi(
                xWT, coi, coi_freq_threshold
            )  # restrict timepoints to only where min freq is met
            full_coupling[i, :, :] = coi_xWT
            if verbose:
                print('Edge # ' + str(i))
            i += 1
    return full_coupling
Пример #5
0
def calculate_power_wavelet(rr_intervals, heart_rate=4, mother_wave='morlet'):
    """
    Method to calculate the spectral power using wavelet method.

    Parameters
    ----------
    rr_intervals: array-like
        list of RR interval (in ms)
    heart_rate: int
        values = The range of heart rate frequency * 2
    mother_wave: str
        The main waveform to transform data.
        Available waves are:
        'gaussian':
        'paul': apply lomb method to compute PSD
        'mexican_hat':

    Returns
    -------
    freq : list
        Frequency of the corresponding psd points.
    psd : list
        Power Spectral Density of the signal.
    """
    dt = 1 / heart_rate
    if mother_wave in mother_wave_dict.keys():
        mother_morlet = mother_wave_dict[mother_wave]
    else:
        mother_morlet = wavelet.Morlet()

    wave, scales, freqs, coi, fft, fftfreqs = \
        wavelet.cwt(rr_intervals, dt, wavelet=mother_morlet)
    powers = (np.abs(wave))**2
    return freqs, powers
def calculateCWT(t,s,steps=32):
    mother = wavelet.Morlet(6)
    deltaT = t[1] - t[0]
    dj = 1 / steps        # sub-octaves per octaves
    s0 = 2 * deltaT       # Starting scale, here 2 months
    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(s, deltaT, dj, s0, -1, mother)
    # Normalized wavelet power spectra
    power = (np.abs(wave)) ** 2
    return power,scales,coi,freqs
Пример #7
0
def wavelet_decompose_power_spectrum(signal, wl=None,
                                     resample=None,
                                     resample_freq=None,
                                     sampling_frequency=None,
                                     filter_frequency=40,
                                     dt=1):
    """
    :param signal: The signal, a numpy array or PyTorch Tensor of shape (N,)
    :param wl: Provided Wavelet (see pycwt documentation for available wavelets)
    :param resample: Downsample factor for signal time series.
    :param resample_freq: Downsample factor for wavelet frequency plane.
    :param sampling_frequency: Sampling frequency to be used by the butterworth filter, if provided.
    :param filter_frequency: Filter frequency for the butterworth filter
    :param dt: Sampling interval Sampling interval for the continuous wavelet transform.
    :return: Resampled time series, Resamples frequency series, power spectrum of shape (Frequencies, Time),
    Original signal.
    """
    if resample is not None:
        signal = sp.resample(signal, signal.shape[0] // resample)

    if isinstance(signal, torch.Tensor):
        signal = signal.numpy()

    # Butterworth filter
    if sampling_frequency is not None:
        sos = sp.butter(5, filter_frequency, 'low', fs=sampling_frequency, output='sos')
        signal = sp.sosfilt(sos, signal)

    time = np.arange(signal.shape[0])

    # p = np.polyfit(time, signal, 1)
    # dat_notrend = signal - np.polyval(p, time)
    # std = dat_notrend.std()  # Standard deviation
    # dat_norm = dat_notrend / std  # Normalized dataset

    if wl is None:
        wl = wavelet.Morlet(6)

    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(signal, dt, wavelet=wl)
    power = (np.abs(wave)) ** 2

    power /= scales[:, None]

    if resample_freq is not None:
        power = sp.resample(power, num=resample_freq, axis=0)
        freqs = sp.resample(freqs, num=resample_freq)

    return time, np.array(freqs), power, signal
Пример #8
0
def continuous_wavelet_transform(data):
    """ Written using the tutorial at https://pycwt.readthedocs.io/en/latest/tutorial.html"""

    dt = 0.25
    dj = 1 / 12
    dat = (data - data.mean()) / data.std()
    s0 = 2 * dt
    J = 7 / dj
    mother = wavelet.Morlet(6)

    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
        dat, dt, dj, s0, J, mother)

    power = (np.abs(wave))**2
    power = np.log2(power)
    return power
Пример #9
0
def wavelet(data, sampling_rate, f_start, f_stop, f_step=1, morlet=20):
    mother = pycwt.Morlet(morlet)  # Morlet size
    freqs = np.arange(f_start, f_stop + f_step, f_step)  # Frequency range

    wave, scales, freqs, coi, fft, fftfreqs = pycwt.cwt(data,
                                                        1. / sampling_rate,
                                                        freqs=freqs,
                                                        wavelet=mother)

    power = (np.abs(wave))**2
    power /= scales[:,
                    None]  #rectify the power spectrum according to suggestions proposed by Liu et al. (2007)

    mask_coi(power, freqs, coi)

    return freqs, power
Пример #10
0
def xwt(trace_ref, trace_current, fs, ns, nt, vpo, freqmin, freqmax, nptsfreq):
    # Choosing a Morlet wavelet with a central frequency w0 = 6
    mother = wavelet.Morlet(6.)
    # nx represent the number of element in the trace_current array
    nx = np.size(trace_current)
    x_reference = np.transpose(trace_ref)
    x_current = np.transpose(trace_current)
    # Sampling interval
    dt = 1 / fs
    # Spacing between discrete scales, the default value is 1/12
    dj = 1 / vpo
    # Number of scales less one, -1 refers to the default value which is J = (log2(N * dt / so)) / dj.
    J = -1
    # Smallest scale of the wavelet, default value is 2*dt
    s0 = 2 * dt  # Smallest scale of the wavelet, default value is 2*dt

    # Creation of the frequency vector that we will use in the continuous wavelet transform
    freqlim = np.linspace(freqmax, freqmin, num=nptsfreq, endpoint=True, retstep=False, dtype=None, axis=0)

    # Calculation of the two wavelet transform independently
    # scales are calculated using the wavelet Fourier wavelength
    # fft : Normalized fast Fourier transform of the input trace
    # fftfreqs : Fourier frequencies for the calculated FFT spectrum.
    cwt_reference, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(x_reference, dt, dj, s0, J, mother, freqs=freqlim)
    cwt_current, _, _, _, _, _ = wavelet.cwt(x_current, dt, dj, s0, J, mother, freqs=freqlim)

    scales = np.array([[kk] for kk in scales])
    invscales = np.kron(np.ones((1, nx)), 1 / scales)
    cfs1 = smoothCFS(invscales * abs(cwt_reference) ** 2, scales, dt, ns, nt)
    cfs2 = smoothCFS(invscales * abs(cwt_current) ** 2, scales, dt, ns, nt)
    crossCFS = cwt_reference * np.conj(cwt_current)
    WXamp = abs(crossCFS)
    # cross-wavelet transform operation with smoothing
    crossCFS = smoothCFS(invscales * crossCFS, scales, dt, ns, nt)
    WXspec = crossCFS / (np.sqrt(cfs1) * np.sqrt(cfs2))
    WXangle = np.angle(WXspec)
    Wcoh = abs(crossCFS) ** 2 / (cfs1 * cfs2)
    pp = 2 * np.pi * freqs
    pp2 = np.array([[kk] for kk in pp])
    WXdt = WXangle / np.kron(np.ones((1, nx)), pp2)


    return WXamp, WXspec, WXangle, Wcoh, WXdt, freqs, coi
Пример #11
0
def continuous_wavelet_transform(data,
                                 octave_exponent=10,
                                 sub_octaves=25,
                                 starting_scale=2,
                                 dt=1):
    """ Generate a continuous wavelet transform using pycwt
	"""
    std = data.std()  # Standard deviation
    dat = (data - data.mean()) / std  # Calculating anomaly and normalizing
    dj = 1 / sub_octaves  # x sub-octaves per octaves
    s0 = starting_scale  # Starting scale
    J = octave_exponent / dj  # x powers of two with dj sub-octaves
    mother = wavelet.Morlet(6)  # Morlet mother wavelet with m=6

    # The following routines perform the wavelet transform and siginificance
    # analysis for the chosen data set.
    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
        dat, dt, dj, s0, J, mother)
    #print('wave.shape', wave.shape)

    # Normalized wavelet and Fourier power spectra
    wave = (np.abs(wave))**2

    # Resize:
    averaging_window = 160
    #print('before reshape', wave.shape[1])
    height = wave.shape[0]
    rounded_shape = wave.shape[1] - wave.shape[1] % averaging_window
    #print(rounded_shape)
    wave = wave[:, :rounded_shape].reshape(-1, averaging_window).mean(
        axis=1).reshape(height, int(rounded_shape / averaging_window))
    #print('after reshape', wave.shape)

    # Normalize to (0, 1)
    # wave = (wave - np.min(wave)) / (np.max(wave) - np.min(wave))

    # Vertical Flip
    # wave = np.flipud(wave)

    # Logarithm
    wave = np.log2(wave)

    return wave
Пример #12
0
def plot_spectrum(sig, wavename, casename, t1, t2, fs=1 / 60):
    """
    Plot the spectrogram from a given signal, timeframes and frequency
    
    :param sig: The processed signal to plot
    :param wavename: The name of the wavelet
    :param casename: The name of the analysed column
    :param t1: The starting timeframe
    :param t2: The ending timeframe
    :param fs: The frequency, defaults to 1/60
    :return: Plots the spectrogram saving the results to a file
    """
    T = np.array(range(t1, t2))
    dat = sig[T]
    dt = 1 / fs
    t = T / fs / 60 / 60
    dat_norm = dat / dat.std()  # Normalized dataset
    mother = wavelet.Morlet(6)
    s0 = 2 * dt
    dj = 1 / 12
    J = 7 / dj
    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
        dat_norm, dt, dj, s0, J, mother)
    power = (np.abs(wave))**2
    period = 1 / freqs / 60 / 60

    plt.figure(1, figsize=(6.4, 4.8))
    bx = plt.axes([0.1, 0.37, 0.65, 0.28])
    levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16]
    bx.contourf(t,
                period,
                power,
                np.log2(levels),
                extend='both',
                cmap=plt.cm.prism)
    bx.set_ylabel('Period (hours)')
    bx.set_xlabel('Time (hours)')
    plt.savefig('./output/{}_2.png'.format(casename), dpi=300)
    plt.close('all')
Пример #13
0
N = dat.size                            # Number of measurements
time = numpy.arange(0, N) * ds.dt + ds.t0  # Time array in years

dj = 1 / 12                          # Twelve sub-octaves per octaves
s0 = -1  # 2 * dt                    # Starting scale, here 6 months
J = -1  # 7 / dj                     # Seven powers of two with dj sub-octaves
#  alpha = 0.0                       # Lag-1 autocorrelation for white noise
try:
    alpha, _, _ = wavelet.ar1(dat)   # Lag-1 autocorrelation for red noise
except Warning:
    # When the dataset is too short, or there is a strong trend, ar1 raises a
    # warning. In this case, we assume a white noise background spectrum.
    alpha = 1.0

mother = wavelet.Morlet(6)           # Morlet mother wavelet with m=6

# The following routines perform the wavelet transform and siginificance
# analysis for the chosen data set.
wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(dat, ds.dt, dj, s0, J,
                                                      mother)
iwave = wavelet.icwt(wave, scales, ds.dt, dj, mother)

# Normalized wavelet and Fourier power spectra
power = (numpy.abs(wave)) ** 2
fft_power = numpy.abs(fft) ** 2
period = 1 / freqs

# Significance test. Where ratio power/sig95 > 1, power is significant.
signif, fft_theor = wavelet.significance(1.0, ds.dt, scales, 0, alpha,
                                         significance_level=slevel,
Пример #14
0
# num_steps = 512
# x = np.arange(num_steps)
# y = np.sin(2*np.pi*x/300)

# delta_t = x[1] - x[0]
# scales = np.arange(1,num_steps+1)
# wavelet_type = 'morl'
# coefs, freqs = pywt.cwt(y, scales, wavelet_type, delta_t)
# plt.matshow(coefs)
# plt.show()

import pycwt as wavelet
import numpy as np
import matplotlib.pyplot as plt

num_steps = 1000
x = np.arange(num_steps)
y = np.sin(2 * np.pi * x / 200) + np.sin(2 * np.pi * x / 20)

delta_t = x[1] - x[0]
scales = np.arange(1, num_steps + 1)
freqs = 1 / (wavelet.Morlet().flambda() * scales)
wavelet_type = 'morlet'
print(freqs)
coefs, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(y,
                                                       delta_t,
                                                       wavelet=wavelet_type,
                                                       freqs=freqs)
print(freqs)
plt.matshow(abs(coefs)**2)
plt.show()
Пример #15
0
delta = freqs[0:8]
theta = freqs[8:17]
alpha = freqs[17:22]
beta = freqs[22:33]
lgamma = freqs[33:42]
hgamma = freqs[42:50]

bands = [delta, theta, alpha, beta, lgamma, hgamma]



peak_deviations = np.zeros(shape=bo.data.T.shape)

for i, band in enumerate(bands):
    for electrode in range(0, len(bo.data.T)):
        wav_transform, sj, wavelet_freqs, coi, fft, fftfreqs = wavelet.cwt(bo.data[electrode], 1/bo.sample_rate[0], freqs = band, wavelet=wavelet.Morlet(4))
        raw_power = np.square(np.abs(wav_transform))
        avg_power = np.average(raw_power, axis=0)
        log_power = np.log(avg_power)
        log_freqs = np.log(band)
        HR = sklearn.linear_model.HuberRegressor()
       # pdb.set_trace()
        HR.fit(log_freqs.reshape(-1,1), log_power)
        narrowband_power = log_power - (log_freqs * HR.coef_[0] + HR.intercept_)
        peak_deviations[electrode] = narrowband_power

    deviation_bo = se.Brain(data=peak_deviations.T, locs=bo.locs, sample_rate=bo.sample_rate, filter=None)
    deviation_bo.save('peakdev_band_' + str(i) + '_' + fname)

# except:
#     print('.bo file not found')
Пример #16
0
def main():
  
# Then, we load the dataset and define some data related parameters. In this
# case, the first 19 lines of the data file contain meta-data, that we ignore,
# since we set them manually (*i.e.* title, units).
    url = 'http://paos.colorado.edu/research/wavelets/wave_idl/nino3sst.txt'
    dat = numpy.genfromtxt(url, skip_header=19)
    title = 'NINO3 Sea Surface Temperature'
    label = 'NINO3 SST'
    units = 'degC'
    t0 = 1871.0
    dt = 0.25  # In years

#%%
    
    # We also create a time array in years.
    N = dat.size
    t = numpy.arange(0, N) * dt + t0
#%%
# We write the following code to detrend and normalize the input data by its
# standard deviation. Sometimes detrending is not necessary and simply
# removing the mean value is good enough. However, if your dataset has a well
# defined trend, such as the Mauna Loa CO\ :sub:`2` dataset available in the
# above mentioned website, it is strongly advised to perform detrending.
# Here, we fit a one-degree polynomial function and then subtract it from the
# original data.
    p = numpy.polyfit(t - t0, dat, 1)
    dat_notrend = dat - numpy.polyval(p, t - t0)
    std = dat_notrend.std()  # Standard deviation
    var = std ** 2  # Variance
    dat_norm = dat_notrend / std  # Normalized dataset
#%%   
# The next step is to define some parameters of our wavelet analysis. We
# select the mother wavelet, in this case the Morlet wavelet with
# :math:`\omega_0=6`.
    mother = wavelet.Morlet(6)
    s0 = 2 * dt  # Starting scale, in this case 2 * 0.25 years = 6 months
    dj = 1 / 12  # Twelve sub-octaves per octaves
    J = 7 / dj  # Seven powers of two with dj sub-octaves
    alpha, _, _ = wavelet.ar1(dat)  # Lag-1 autocorrelation for red noise

#%%
    
# The following routines perform the wavelet transform and inverse wavelet
# transform using the parameters defined above. Since we have normalized our
# input time-series, we multiply the inverse transform by the standard
# deviation.
    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(dat_norm, dt, dj, s0, J,
                                                      mother)
    iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std

#%%
# We calculate the normalized wavelet and Fourier power spectra, as well as
# the Fourier equivalent periods for each wavelet scale.
    power = (numpy.abs(wave)) ** 2
    fft_power = numpy.abs(fft) ** 2
    period = 1 / freqs
    
#%%
    
    # We could stop at this point and plot our results. However we are also
    # interested in the power spectra significance test. The power is significant
    # where the ratio ``power / sig95 > 1``.
    signif, fft_theor = wavelet.significance(1.0, dt, scales, 0, alpha,
                                         significance_level=0.95,
                                         wavelet=mother)
    sig95 = numpy.ones([1, N]) * signif[:, None]
    sig95 = power / sig95

#%%
    
    # Then, we calculate the global wavelet spectrum and determine its
# significance level.
    glbl_power = power.mean(axis=1)
    dof = N - scales  # Correction for padding at edges
    glbl_signif, tmp = wavelet.significance(var, dt, scales, 1, alpha,
                                        significance_level=0.95, dof=dof,
                                        wavelet=mother)
    
#%%
    
    # We also calculate the scale average between 2 years and 8 years, and its
# significance level.
    sel = find((period >= 2) & (period < 8))
    Cdelta = mother.cdelta
    scale_avg = (scales * numpy.ones((N, 1))).transpose()
    scale_avg = power / scale_avg  # As in Torrence and Compo (1998) equation 24
    scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0)
    scale_avg_signif, tmp = wavelet.significance(var, dt, scales, 2, alpha,
                                             significance_level=0.95,
                                             dof=[scales[sel[0]],
                                                  scales[sel[-1]]],
                                             wavelet=mother)

#%%
    
    # Finally, we plot our results in four different subplots containing the
# (i) original series anomaly and the inverse wavelet transform; (ii) the
# wavelet power spectrum (iii) the global wavelet and Fourier spectra ; and
# (iv) the range averaged wavelet spectrum. In all sub-plots the significance
# levels are either included as dotted lines or as filled contour lines.

# Prepare the figure
    pyplot.close('all')
    pyplot.ioff()
    figprops = dict(figsize=(11, 8), dpi=72)
    fig = pyplot.figure(**figprops)
    
#%%
    
    # First sub-plot, the original time series anomaly and inverse wavelet
# transform.
    ax = pyplot.axes([0.1, 0.75, 0.65, 0.2])
    ax.plot(t, iwave, '-', linewidth=1, color=[0.5, 0.5, 0.5])
    ax.plot(t, dat, 'k', linewidth=1.5)
    ax.set_title('a) {}'.format(title))
    ax.set_ylabel(r'{} [{}]'.format(label, units))
Пример #17
0
        eta[ii] = np.sin(2 * np.pi * ii / 10) / 2

# HW8-2
"""
for ii in i:
    eta[ii] = np.sin(2 * np.pi * ii / 20) + np.sin(2 * np.pi * ii / 10) / 2
"""
fig, sub = plt.subplots(figsize=(10, 4))
sub.plot(i, eta, ls="-", c="k", lw=1)
sub.set_xlim(min(i), max(i))
sub.set_xticks(np.arange(0, 200 + 20, 20))
sub.set_xlabel("i", fontdict={"weight": "bold"})
sub.set_ylabel("Eta", fontdict={"weight": "bold"})
sub.set_title("Data", fontdict={"weight": "bold"})

mother = wavelet.Morlet(f0=6)
alpha, _, _ = wavelet.ar1(eta)
dj = 0.25
s0 = 2 * dt
J = 7 / dj

wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(signal = eta, dt = dt,\
                                                      dj = dj, s0 = s0, J = J,\
                                                      wavelet = mother)

power = np.abs(wave)**2
fft_power = np.abs(fft)**2
period = 1 / freqs

signif, fft_theor = wavelet.significance(signal = 1.0, dt = dt,\
                                         scales = scales, alpha = alpha,\
Пример #18
0
def get_graph_from_file(in_filepath, out_folder, out_filename):
    # Get data
    # TODO there are differents formats of file
    # TODO implement differents parsers by parameters of function
    p1 = numpy.genfromtxt(in_filepath)

    # TODO fix this shit
    dat = p1

    title = 'NINO3 Sea Surface Temperature'
    label = 'NINO3 SST'
    units = 'degC'

    # Values for calculations
    # TODO spike about args
    t0 = 12.0  # start time
    dt = 0.5  # step of differentiation - in minutes

    N = dat.size
    t = numpy.arange(0, N) * dt + t0

    p = numpy.polyfit(t - t0, dat, 1)
    dat_notrend = dat - numpy.polyval(p, t - t0)
    std = dat_notrend.std()  # Standard deviation
    var = std**2  # Variance
    dat_norm = dat_notrend / std  # Normalized dataset

    mother = wavelet.Morlet(6)
    s0 = 2 * dt  # Starting scale, in this case 2 * 0.25 years = 6 months
    dj = 1 / 12  # Twelve sub-octaves per octaves
    J = 7 / dj  # Seven powers of two with dj sub-octaves
    alpha, _, _ = wavelet.ar1(dat)  # Lag-1 autocorrelation for red noise

    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
        dat_norm, dt, dj, s0, J, mother)
    iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std

    power = (numpy.abs(wave))**2
    fft_power = numpy.abs(fft)**2
    period = 1 / freqs

    power /= scales[:, None]

    signif, fft_theor = wavelet.significance(1.0,
                                             dt,
                                             scales,
                                             0,
                                             alpha,
                                             significance_level=0.95,
                                             wavelet=mother)
    sig95 = numpy.ones([1, N]) * signif[:, None]
    sig95 = power / sig95

    glbl_power = power.mean(axis=1)
    dof = N - scales  # Correction for padding at edges
    glbl_signif, tmp = wavelet.significance(var,
                                            dt,
                                            scales,
                                            1,
                                            alpha,
                                            significance_level=0.95,
                                            dof=dof,
                                            wavelet=mother)

    sel = find((period >= 2) & (period < 8))
    Cdelta = mother.cdelta
    scale_avg = (scales * numpy.ones((N, 1))).transpose()
    scale_avg = power / scale_avg  # As in Torrence and Compo (1998) equation 24
    scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0)
    scale_avg_signif, tmp = wavelet.significance(
        var,
        dt,
        scales,
        2,
        alpha,
        significance_level=0.95,
        dof=[scales[sel[0]], scales[sel[-1]]],
        wavelet=mother)

    # Prepare the figure
    pyplot.close('all')
    #pyplot.ioff()
    figprops = dict(dpi=144)
    fig = pyplot.figure(**figprops)

    # Second sub-plot, the normalized wavelet power spectrum and significance
    # level contour lines and cone of influece hatched area. Note that period
    # scale is logarithmic.
    bx = pyplot.axes([0.1, 0.37, 0.65, 0.28])
    levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16]
    bx.contourf(t,
                period,
                numpy.log2(power),
                numpy.log2(levels),
                extend='both',
                cmap=pyplot.cm.viridis)
    extent = [t.min(), t.max(), 0, max(period)]
    bx.contour(t,
               period,
               sig95, [-99, 1],
               colors='k',
               linewidths=2,
               extent=extent)
    bx.set_title('{} Wavelet Power Spectrum ({})'.format(label, mother.name))
    bx.set_ylabel('Period (minutes)')
    #
    #Yticks = 2 ** numpy.arange(numpy.ceil(numpy.log2(period.min())),
    #                        numpy.ceil(numpy.log2(period.max())))
    #bx.set_yticks(numpy.log2(Yticks))
    #bx.set_yticklabels(Yticks)
    bx.set_ylim([2, 20])

    # Save graph to file
    # TODO implement
    #pyplot.savefig('{}/{}.png'.format(out_folder, out_filename))
    # ----------------------------------------------
    # or show the graph
    pyplot.show()
Пример #19
0
def cwt_analysis(params, mother_name="mexican_hat",num_scales=12, first_scale = None, first_freq = None, scale_distance=1.0, apply_coi=True, period=5, frame_rate = 200):
    """Achieve the continous wavelet analysis of given parameters

    Parameters
    ----------
    params: arraylike
        The parameters to analyze.
    mother_name: string, optional
        The name of the mother wavelet [default: mexican_hat].
    num_scales: int, optional
        The number of scales [default: 12].
    first_scale: int, optional
        The width of the shortest scale
    first_freq: int, optional
        The highest frequency in Hz
    scale_distance: float, optional
        The distance between scales [default: 1.0].
    apply_coi: boolean, optional
        Apply the Cone Of Influence (coi)
    period: int, optional
        The period of the mother wavelet [default: 5].
    frame_rate: int, optional
        The signal frame rate [default: 200].

    Returns
    -------
    wavelet_matrix: ndarray
    	The wavelet data resulting from the analysis
    scales: arraylike
    	The scale indices corresponding to the wavelet data
    """
    # setup wavelet transform
   
    dt = 1. /float(frame_rate)  # frame length

    if not first_scale:
        first_scale = dt # first scale, here frame length
    
    if first_freq:
        first_scale = _freq2scale(first_freq, mother_name, period)
        
    dj = scale_distance  # distance between scales in octaves
    J =  num_scales #  number of scales

    mother = cwt.MexicanHat()

    if str.lower(mother_name) == "morlet":
        mother = cwt.Morlet(period)
    elif str.lower(mother_name) == "paul":
        mother = cwt.Paul(period)

    wavelet_matrix, scales, freqs, coi, fft, fftfreqs = _padded_cwt(params, dt, dj, first_scale, J,mother, 400)
    #wavelet_matrix, scales, freqs, coi, fft, fftfreqs = cwt.cwt(f0_mean_sub, dt, dj, s0, J,mother)

    #wavelet_matrix = abs(wavelet_matrix)
    wavelet_matrix = _scale_for_reconstruction((wavelet_matrix), scales, dj, dt,mother=mother_name,period=period)

    if apply_coi:
        #wavelet_matrix = _zero_outside_coi(wavelet_matrix, scales/dt*0.5)
        wavelet_matrix = _zero_outside_coi(wavelet_matrix, freqs, frame_rate)
    import numpy as np
    np.set_printoptions(precision=3, suppress=True)
    return (wavelet_matrix,scales,freqs)
Пример #20
0
# Run parameters
maxdim = 1
truncDim = 0
TR = 1.5
knn = 333 - 5

# adjust data stact
buffLen = int(450)
mixLen = int(300)
trimLen = 120
trimLen0 = int(350)
trimLen1 = 120
trimEdges = [124, 894]

# cwt parameters
mother = pycwt.Morlet(6)
s0 = 6.5
dj = .32
J = 14
s1 = s0 * 2**(J * dj)

frq_edges = [2, 13]

#What metrics to pursue
metrics = ['simplex']

# thresholds
TARGS = np.array((0.05, 0.1, 0.15, 0.2))
TARGS_CEIL = 1

# for persistence landscape
Пример #21
0
    def limStructure(self,
                     frequency=100e3,
                     wavelet='Mexican',
                     peaks=False,
                     valleys=False):
        """
        Determination of the time location of the intermittent
        structure accordingly to the method defined in
        *M. Onorato et al Phys. Rev. E 61, 1447 (2000)*

        Parameters
        ----------
        frequency : :obj: `float`
            Fourier frequency considered for the analysis
        wavelet : :obj: `string`
            Mother wavelet for the continuous wavelet analysis
            possibilityes are *Mexican [default]*,  *DOG1* or
            *Morlet*
        peaks : :obj: `Boolean`
            if set it computes the structure only for the peaks
            Default is False
        valleys : :obj: `Boolean`
            if set it computes the structure only for the valleys
            Default is False

        Returns
        -------
        maxima : :obj: `ndarray`
           A binary array equal to 1 at the identification of
           the structure (local maxima)
        allmax : :obj: `ndarray`
            A binary array equal to 1 in all the region where the
            signal is above the threshold

        Attributes
        ----------
        scale : :obj: `float`
            Corresponding scale for the chosen wavelet

        """
        if wavelet == 'Mexican':
            self.mother = wav.MexicanHat()
        elif wavelet == 'DOG1':
            self.mother = wav.DOG(m=1)
        elif wavelet == 'Morlet':
            self.mother = wav.Morlet()
        else:
            print('Not a valid wavelet using Mexican')
            self.mother = wav.Mexican_hat()

        self.freq = frequency
        self.scale = 1. / self.mother.flambda() / self.freq

        # compute the continuous wavelet transform
        wt, sc, freqs, coi, fft, fftfreqs = wav.cwt(self.sig, self.dt, 0.25,
                                                    self.scale, 0, self.mother)
        wt = np.real(np.squeeze(wt))
        wtOr = wt.copy()
        # normalization
        wt = (wt - wt.mean()) / wt.std()
        self.lim = np.squeeze(np.abs(wt**2) / np.mean(wt**2))
        flatness = self.flatness
        newflat = flatness
        threshold = 20.
        while newflat >= 3.05 and threshold > 0:
            threshold -= 0.2
            d_ev = (self.lim > threshold)
            count = np.count_nonzero(d_ev)
            if 0 < count < self.lim.size:
                newflat = np.mean(wt[~d_ev] ** 4) / \
                          np.mean(wt[~d_ev] ** 2) ** 2

        # now we have identified the threshold
        # we need to find the maximum above the treshold
        maxima = np.zeros(self.sig.size)
        allmax = np.zeros(self.sig.size)
        allmax[(self.lim > threshold)] = 1
        imin = 0
        for i in range(maxima.size - 1):
            i += 1
            if self.lim[i] >= threshold > self.lim[i - 1]:
                imin = i
            if self.lim[i] < threshold <= self.lim[i - 1]:
                imax = i - 1
                if imax == imin:
                    d = 0
                else:
                    d = self.lim[imin:imax].argmax()
                maxima[imin + d] = 1

        if peaks:
            ddPeak = ((maxima == 1) & (wtOr > 0))
            maxima[~ddPeak] = 0
        if valleys:
            ddPeak = ((maxima == 1) & (wtOr < 0))
            maxima[~ddPeak] = 0
        return maxima, allmax
Пример #22
0
def parse_frames(image_file, sig=0.95):
    """
    
    """
    cap = cv2.VideoCapture(image_file)
    if verbose: print("Video successfully loaded")
    FRAME_COUNT = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    FPS = cap.get(cv2.CAP_PROP_FPS)
    if verbose > 1:
        FRAME_HEIGHT = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        FRAME_WIDTH = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        print(
            "INFO: \n Frame count: ",
            FRAME_COUNT,
            "\n",
            "FPS: ",
            FPS,
            " \n",
            "FRAME_HEIGHT: ",
            FRAME_HEIGHT,
            " \n",
            "FRAME_WIDTH: ",
            FRAME_WIDTH,
            " \n",
        )

    directory = os.getcwd(
    ) + '\\analysis\\{}_{}_{}_{}({})_{}_{}_scaled\\'.format(
        date, trial_type, name, wavelet, order, per_min, per_max)
    if not os.path.exists(directory):
        os.makedirs(directory)
    made = False
    frame_idx = 0
    idx = 0
    dropped = 0
    skip = True
    thresh = None

    df_wav = pd.DataFrame()
    df_auc = pd.DataFrame()
    df_for = pd.DataFrame()
    df_pow = pd.DataFrame()

    for i in range(FRAME_COUNT):
        a, img = cap.read()
        if a:
            frame_idx += 1

            if made == False:
                #first we need to manually determine the boundaries and angle
                res = bg.manual_format(img)
                #print(res)
                x, y, w, h, angle = res
                horizon_begin = x
                horizon_end = x + w
                vert_begin = y
                vert_end = y + h
                #scale_array = np.zeros((FRAME_COUNT, abs(horizon_begin - horizon_end)))
                #area_time = np.zeros((FRAME_COUNT))
                #df[']
                print("Now Select the Red dot")
                red_res = bg.manual_format(img, stop_sign=True)
                red_x, red_y, red_w, red_h = red_res
                box_h_begin = red_x
                box_h_end = red_x + red_w
                box_v_begin = red_y
                box_v_end = red_y + red_h
                made = True
                #dims = (vert_begin, vert_end, horizon_begin, horizon_end)

            real_time = i / FPS
            rows, cols, chs = img.shape
            M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
            rot_img = cv2.warpAffine(img, M, (cols, rows))
            roi = rot_img[vert_begin:vert_end, horizon_begin:horizon_end, :]

            red_box = img[box_v_begin:box_v_end, box_h_begin:box_h_end, 2]
            if thresh == None:
                thresh = np.mean(red_box)
            #print(np.mean(red_box))
            percent_drop = 1 - (np.mean(red_box) / thresh)
            print(percent_drop)
            if percent_drop >= 0.18:
                #cv2.imshow("Red Image", red_box)
                #cv2.waitKey(0)
                skip = False

            if skip:
                if verbose >= 1:
                    print('Frame is skipped {} / {}'.format(
                        frame_idx, FRAME_COUNT))
                continue

            if verbose >= 1:
                print('Processing frame {} / {}'.format(
                    frame_idx, FRAME_COUNT))

            idx += 1
            begin_code, data_line = extract_frame(roi)

            #We need to detrend the data before sending it away
            N = len(data_line)
            dt = su / N
            t = np.arange(0, N) * dt
            t = t - np.mean(t)

            var, std, dat_norm = detrend(data_line)
            ###################################################################
            if wavelet == 'DOG':
                mother = cwt.DOG(order)
            elif wavelet == 'Paul':
                mother = cwt.Paul(order)
            elif wavelet == 'Morlet':
                mother = cwt.Morlet(order)
            elif wavelet == 'MexicanHat':
                mother = cwt.MexicanHat(order)

            s0 = 4 * dt
            try:
                alpha, _, _ = cwt.ar1(dat_norm)
            except:
                alpha = 0.95

            wave, scales, freqs, coi, fft, fftfreqs = cwt.cwt(
                dat_norm, dt, dj, s0, J, mother)

            iwave = cwt.icwt(
                wave, scales, dt, dj,
                mother) * std  #This is a reconstruction of the wave

            power = (np.abs(wave))**2  #This is the power spectra
            fft_power = np.abs(fft)**2  #This is the fourier power
            period = 1 / freqs  #This is the periods of the wavelet analysis in cm
            power /= scales[:,
                            None]  #This is an option suggested by Liu et. al.

            #Next we calculate the significance of the power spectra. Significane where power / sig95 > 1
            signif, fft_theor = cwt.significance(1.0,
                                                 dt,
                                                 scales,
                                                 0,
                                                 alpha,
                                                 significance_level=0.95,
                                                 wavelet=mother)
            sig95 = np.ones([1, N]) * signif[:, None]
            sig95 = power / sig95

            #This is the significance of the global wave
            glbl_power = power.mean(axis=1)
            dof = N - scales  # Correction for padding at edges
            glbl_signif, tmp = cwt.significance(var,
                                                dt,
                                                scales,
                                                1,
                                                alpha,
                                                significance_level=0.95,
                                                dof=dof,
                                                wavelet=mother)

            sel = find((period >= per_min) & (period < per_max))
            Cdelta = mother.cdelta
            scale_avg = (scales * np.ones((N, 1))).transpose()
            scale_avg = power / scale_avg  # As in Torrence and Compo (1998) equation 24
            #scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0)

            #scale_array[i,:] = scale_array[i,:]/np.max(scale_array[i,:])
            #data_array[i,:] = data_array[i,:]/np.max(data_array[i,:])

            scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0)
            scale_avg_signif, tmp = cwt.significance(
                var,
                dt,
                scales,
                2,
                alpha,
                significance_level=0.95,
                dof=[scales[sel[0]], scales[sel[-1]]],
                wavelet=mother)
            Yticks = 2**np.arange(np.ceil(np.log2(period.min())),
                                  np.ceil(np.log2(period.max())))

            plt.close('all')
            plt.ioff()
            figprops = dict(figsize=(11, 8), dpi=72)
            fig = plt.figure(**figprops)

            wx = plt.axes([0.77, 0.75, 0.2, 0.2])
            imz = 0
            for idxy in range(0, len(period), 10):
                wx.plot(t, mother.psi(t / period[idxy]) + imz, linewidth=1.5)
                imz += 1
            wx.xaxis.set_ticklabels([])
            #wx.set_ylim([-10,10])
            # First sub-plot, the original time series anomaly and inverse wavelet
            # transform.
            ax = plt.axes([0.1, 0.75, 0.65, 0.2])
            ax.plot(t,
                    data_line - np.mean(data_line),
                    'k',
                    label="Original Data")
            ax.plot(t,
                    iwave,
                    '-',
                    linewidth=1,
                    color=[0.5, 0.5, 0.5],
                    label="Reconstructed wave")
            ax.plot(t,
                    dat_norm,
                    '--k',
                    linewidth=1.5,
                    color=[0.5, 0.5, 0.5],
                    label="Denoised Wave")
            ax.set_title(
                'a) {:10.2f} from beginning of trial.'.format(real_time))
            ax.set_ylabel(r'{} [{}]'.format("Amplitude", unit))
            ax.legend(loc=1)
            ax.set_ylim([-200, 200])
            #If the non-serrated section, bounds are 200 -
            # Second sub-plot, the normalized wavelet power spectrum and significance
            # level contour lines and cone of influece hatched area. Note that period
            # scale is logarithmic.
            bx = plt.axes([0.1, 0.37, 0.65, 0.28], sharex=ax)
            levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16]
            cont = bx.contourf(t,
                               np.log2(period),
                               np.log2(power),
                               np.log2(levels),
                               extend='both',
                               cmap=plt.cm.viridis)
            extent = [t.min(), t.max(), 0, max(period)]
            bx.contour(t,
                       np.log2(period),
                       sig95, [-99, 1],
                       colors='k',
                       linewidths=2,
                       extent=extent)
            bx.fill(np.concatenate(
                [t, t[-1:] + dt, t[-1:] + dt, t[:1] - dt, t[:1] - dt]),
                    np.concatenate([
                        np.log2(coi), [1e-9],
                        np.log2(period[-1:]),
                        np.log2(period[-1:]), [1e-9]
                    ]),
                    'k',
                    alpha=0.3,
                    hatch='x')
            bx.set_title(
                'b) {} Octaves Wavelet Power Spectrum [{}({})]'.format(
                    octaves, mother.name, order))
            bx.set_ylabel('Period (cm)')
            #
            Yticks = 2**np.arange(np.ceil(np.log2(period.min())),
                                  np.ceil(np.log2(period.max())))
            bx.set_yticks(np.log2(Yticks))
            bx.set_yticklabels(Yticks)
            cbar = fig.colorbar(cont, ax=bx)
            # Third sub-plot, the global wavelet and Fourier power spectra and theoretical
            # noise spectra. Note that period scale is logarithmic.
            cx = plt.axes([0.77, 0.37, 0.2, 0.28], sharey=bx)
            cx.plot(glbl_signif, np.log2(period), 'k--')
            cx.plot(var * fft_theor, np.log2(period), '--', color='#cccccc')
            cx.plot(var * fft_power,
                    np.log2(1. / fftfreqs),
                    '-',
                    color='#cccccc',
                    linewidth=1.)
            cx.plot(var * glbl_power, np.log2(period), 'k-', linewidth=1.5)
            cx.set_title('c) Global Wavelet Spectrum')
            cx.set_xlabel(r'Power [({})^2]'.format(unit))
            #cx.set_xlim([0, (var*fft_theor).max()])
            plt.xscale('log')
            cx.set_ylim(np.log2([period.min(), period.max()]))
            cx.set_yticks(np.log2(Yticks))
            cx.set_yticklabels(Yticks)

            #if sig_array == []:
            yvals = np.linspace(Yticks.min(), Yticks.max(), len(period))

            plt.xscale('linear')
            plt.setp(cx.get_yticklabels(), visible=False)

            # Fourth sub-plot, the scale averaged wavelet spectrum.
            dx = plt.axes([0.1, 0.07, 0.65, 0.2], sharex=ax)
            dx.axhline(scale_avg_signif,
                       color='k',
                       linestyle='--',
                       linewidth=1.)
            dx.plot(t, scale_avg, 'k-', linewidth=1.5)
            dx.set_title('d) {}-{}cm scale-averaged power'.format(
                per_min, per_max))
            dx.set_xlabel('Distance from center(cm)')
            dx.set_ylabel(r'Average variance [{}]'.format(unit))
            #dx.set_ylim([0,500])
            ax.set_xlim([t.min(), t.max()])

            #plt.savefig(directory+'{}_analysis_frame-{}.png'.format(name, idx), bbox = 'tight')
            if verbose >= 2:
                print('*' * int((i / FRAME_COUNT) * 100))

            df_wav[real_time] = (pd.Series(dat_norm, index=t))
            df_pow[real_time] = (pd.Series(var * glbl_power,
                                           index=np.log2(period)))
            df_for[real_time] = (pd.Series(var * fft_power,
                                           index=np.log2(1. / fftfreqs)))
            df_auc[real_time] = [np.trapz(data_line)]

        else:
            print("Frame #{} has dropped".format(i))
            dropped += 1

    if verbose >= 1: print('All images saved')
    if verbose >= 1:
        print("{:10.2f} % of the frames have dropped".format(
            (dropped / FRAME_COUNT) * 100))

    #Plotting and saving tyhe

    row, cols = df_pow.shape
    time = np.arange(0, cols) / FPS

    plt.close('all')
    plt.ioff()
    plt.contourf(time, df_pow.index.tolist(), df_pow)
    plt.contour(time, df_pow.index.tolist(), df_pow)
    plt.title("Global Power over Time")
    plt.ylabel("Period[cm]")
    plt.xlabel("Time")
    cax = plt.gca()
    #plt.xscale('log')
    cax.set_ylim(np.log2([period.min(), period.max()]))
    cax.set_yticks(np.log2(Yticks))
    cax.set_yticklabels(Yticks)

    plt.savefig(directory + '{}_global_power-{}.png'.format(name, idx),
                bbox='tight')

    row, cols = df_for.shape
    time = np.arange(0, cols) / FPS
    plt.close('all')
    plt.ioff()
    plt.contourf(time, df_for.index.tolist(), df_for)
    plt.contour(time, df_for.index.tolist(), df_for)
    plt.title("Fourier Power over Time")
    plt.ylabel("Period[cm]")
    plt.xlabel("Time")
    cax = plt.gca()
    #plt.xscale('log')
    cax.set_ylim(np.log2([period.min(), period.max()]))
    cax.set_yticks(np.log2(Yticks))
    cax.set_yticklabels(Yticks)
    plt.savefig(directory + '{}_fourier_power-{}.png'.format(name, idx),
                bbox='tight')

    plt.close('all')
    plt.ioff()
    rows, cols = df_auc.shape
    time = np.arange(0, cols) / FPS
    plt.plot(time, df_auc.T)
    plt.xlabel("Time")
    plt.ylabel("Area under the curve in cm")
    plt.title("Area under the curve over time")
    plt.savefig(directory + '{}_area_under_curve-{}.png'.format(name, idx),
                bbox='tight')

    df_wav['Mean'] = df_wav.mean(axis=1)
    df_pow['Mean'] = df_pow.mean(axis=1)
    df_for['Mean'] = df_for.mean(axis=1)
    df_auc['Mean'] = df_auc.mean(axis=1)

    df_wav['Standard Deviation'] = df_wav.std(axis=1)
    df_pow['Standard Deviation'] = df_pow.std(axis=1)
    df_for['Standard Deviation'] = df_for.std(axis=1)
    df_auc['Standard Deviation'] = df_auc.std(axis=1)

    ##[Writing analysis to excel]##############################################

    print("Writing files")
    writer = pd.ExcelWriter(directory + "analysis{}.xlsx".format(trial_name))
    df_wav.to_excel(writer, "Raw Waveforms")
    df_auc.to_excel(writer, "Area Under the Curve")
    df_for.to_excel(writer, "Fourier Spectra")
    df_pow.to_excel(writer, "Global Power Spectra")
    writer.save()

    ##[Writing means to a single file]#########################################

    #filename = 'C:\\pyscripts\\wavelet_analysis\\Overall_Analysis.xlsx'
    #append_data(filename, df_pow['Mean'].values,  str(trial_name), Yticks)
    ##[Plotting mean power and foruier]########################################
    plt.close('all')
    plt.ioff()
    plt.plot(df_pow['Mean'], df_pow.index.tolist(), label="Global Power")
    plt.plot(df_for['Mean'], df_for.index.tolist(), label="Fourier Power")
    plt.title("Global Power averaged over Time")
    plt.ylabel("Period[cm]")
    plt.xlabel("Power[cm^2]")
    cax = plt.gca()
    #plt.xscale('log')
    cax.set_ylim(np.log2([period.min(), period.max()]))
    cax.set_yticks(np.log2(Yticks))
    cax.set_yticklabels(Yticks)
    plt.legend()
    plt.savefig(directory + '{}_both_{}.png'.format(name, idx), bbox='tight')

    plt.close('all')
    plt.ioff()
    plt.plot(df_pow['Mean'], df_pow.index.tolist(), label="Global Power")
    plt.title("Global Power averaged over Time")
    plt.ylabel("Period[cm]")
    plt.xlabel("Power[cm^2]")
    cax = plt.gca()
    #plt.xscale('log')
    cax.set_ylim(np.log2([period.min(), period.max()]))
    cax.set_yticks(np.log2(Yticks))
    cax.set_yticklabels(Yticks)
    plt.legend()
    plt.savefig(directory + '{}_global_power_{}.png'.format(name, idx),
                bbox='tight')

    plt.close('all')
    plt.ioff()
    plt.plot(df_for['Mean'], df_for.index.tolist(), label="Fourier Power")
    plt.title("Fourier averaged over Time")
    plt.ylabel("Period[cm]")
    plt.xlabel("Power[cm^2]")
    cax = plt.gca()
    #plt.xscale('log')
    cax.set_ylim(np.log2([period.min(), period.max()]))
    cax.set_yticks(np.log2(Yticks))
    cax.set_yticklabels(Yticks)
    plt.legend()
    plt.savefig(directory + '{}_fourier_{}.png'.format(name, idx),
                bbox='tight')

    cap.release()
    return directory
Пример #23
0
    def cwt(signal, t, obspy=None):
        # from __future__ import division
        import numpy
        from matplotlib import pyplot

        import pycwt as wavelet
        from pycwt.helpers import find
        signal = signal[10000:11000]
        t = t[10000:11000]
        url = 'http://paos.colorado.edu/research/wavelets/wave_idl/nino3sst.txt'
        dat = numpy.genfromtxt(url, skip_header=19)
        title = 'DICARDIA'
        label = 'DICARDIA SST'
        units = 'degC'
        t0 = 1871.0
        dt = 0.25  # In years

        N = signal.shape[0]
        print(N)
        p = numpy.polyfit(t, signal, 1)
        dat_notrend = signal - numpy.polyval(p, t)
        std = dat_notrend.std()  # Standard deviation
        var = std**2  # Variance
        dat_norm = dat_notrend / std  # Normalized dataset

        mother = wavelet.Morlet(6)
        s0 = 2 * dt  # Starting scale, in this case 2 * 0.25 years = 6 months
        dj = 1 / 12  # Twelve sub-octaves per octaves
        J = 7 / dj  # Seven powers of two with dj sub-octaves
        alpha, _, _ = wavelet.ar1(
            signal)  # Lag-1 autocorrelation for red noise

        wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
            dat_norm, dt, dj, s0, J, mother)
        iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std

        power = (numpy.abs(wave))**2
        fft_power = numpy.abs(fft)**2
        period = 1 / freqs

        power /= scales[:, None]

        signif, fft_theor = wavelet.significance(1.0,
                                                 dt,
                                                 scales,
                                                 0,
                                                 alpha,
                                                 significance_level=0.95,
                                                 wavelet=mother)
        sig95 = numpy.ones([1, N]) * signif[:, None]
        sig95 = power / sig95

        glbl_power = power.mean(axis=1)
        dof = N - scales  # Correction for padding at edges
        glbl_signif, tmp = wavelet.significance(var,
                                                dt,
                                                scales,
                                                1,
                                                alpha,
                                                significance_level=0.95,
                                                dof=dof,
                                                wavelet=mother)
        sel = find((period >= 2) & (period < 8))
        Cdelta = mother.cdelta
        scale_avg = (scales * numpy.ones((N, 1))).transpose()
        scale_avg = power / scale_avg  # As in Torrence and Compo (1998) equation 24
        scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0)
        scale_avg_signif, tmp = wavelet.significance(
            var,
            dt,
            scales,
            2,
            alpha,
            significance_level=0.95,
            dof=[scales[sel[0]], scales[sel[-1]]],
            wavelet=mother)
        # Prepare the figure
        pyplot.close('all')
        pyplot.ioff()
        figprops = dict(figsize=(11, 8), dpi=72)
        fig = pyplot.figure(**figprops)

        # First sub-plot, the original time series anomaly and inverse wavelet
        # transform.
        ax = pyplot.axes([0.1, 0.75, 0.65, 0.2])
        ax.plot(t, iwave, '-', linewidth=1, color=[0.5, 0.5, 0.5])
        ax.plot(t, signal, 'k', linewidth=1.5)
        ax.set_title('a) {}'.format(title))
        ax.set_ylabel(r'{} [{}]'.format(label, units))

        # Second sub-plot, the normalized wavelet power spectrum and significance
        # level contour lines and cone of influece hatched area. Note that period
        # scale is logarithmic.
        bx = pyplot.axes([0.1, 0.37, 0.65, 0.28], sharex=ax)
        levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16]
        bx.contourf(t,
                    numpy.log2(period),
                    numpy.log2(power),
                    numpy.log2(levels),
                    extend='both',
                    cmap=pyplot.cm.viridis)
        extent = [t.min(), t.max(), 0, max(period)]
        bx.contour(t,
                   numpy.log2(period),
                   sig95, [-99, 1],
                   colors='k',
                   linewidths=2,
                   extent=extent)
        bx.fill(numpy.concatenate(
            [t, t[-1:] + dt, t[-1:] + dt, t[:1] - dt, t[:1] - dt]),
                numpy.concatenate([
                    numpy.log2(coi), [1e-9],
                    numpy.log2(period[-1:]),
                    numpy.log2(period[-1:]), [1e-9]
                ]),
                'k',
                alpha=0.3,
                hatch='x')
        bx.set_title('b) {} Wavelet Power Spectrum ({})'.format(
            label, mother.name))
        bx.set_ylabel('Period (years)')
        #
        Yticks = 2**numpy.arange(numpy.ceil(numpy.log2(period.min())),
                                 numpy.ceil(numpy.log2(period.max())))
        bx.set_yticks(numpy.log2(Yticks))
        bx.set_yticklabels(Yticks)

        # Third sub-plot, the global wavelet and Fourier power spectra and theoretical
        # noise spectra. Note that period scale is logarithmic.
        cx = pyplot.axes([0.77, 0.37, 0.2, 0.28], sharey=bx)
        cx.plot(glbl_signif, numpy.log2(period), 'k--')
        cx.plot(var * fft_theor, numpy.log2(period), '--', color='#cccccc')
        cx.plot(var * fft_power,
                numpy.log2(1. / fftfreqs),
                '-',
                color='#cccccc',
                linewidth=1.)
        cx.plot(var * glbl_power, numpy.log2(period), 'k-', linewidth=1.5)
        cx.set_title('c) Global Wavelet Spectrum')
        cx.set_xlabel(r'Power [({})^2]'.format(units))
        cx.set_xlim([0, glbl_power.max() + var])
        cx.set_ylim(numpy.log2([period.min(), period.max()]))
        cx.set_yticks(numpy.log2(Yticks))
        cx.set_yticklabels(Yticks)
        pyplot.setp(cx.get_yticklabels(), visible=False)

        # Fourth sub-plot, the scale averaged wavelet spectrum.
        dx = pyplot.axes([0.1, 0.07, 0.65, 0.2], sharex=ax)
        dx.axhline(scale_avg_signif, color='k', linestyle='--', linewidth=1.)
        dx.plot(t, scale_avg, 'k-', linewidth=1.5)
        dx.set_title('d) {}--{} year scale-averaged power'.format(2, 8))
        dx.set_xlabel('Time (year)')
        dx.set_ylabel(r'Average variance [{}]'.format(units))
        ax.set_xlim([t.min(), t.max()])

        pyplot.show()
Пример #24
0
def wavelet_analysis(z,
                     tm,
                     lon=None,
                     lat=None,
                     mother='Morlet',
                     alpha=0.0,
                     siglvl=0.95,
                     loc=None,
                     onlyloc=False,
                     periods=None,
                     sel_periods=[],
                     show=False,
                     save='',
                     dsave='',
                     prefix='',
                     labels=dict(),
                     title=None,
                     name=None,
                     fpath='',
                     fpattern='',
                     std=dict(),
                     crange=None,
                     levels=None,
                     cmap=cm.GMT_no_green,
                     debug=False):
    """Continuous wavelet transform and significance analysis.

    The analysis is made using the methodology and statistical approach
    suggested by Torrence and Compo (1998).

    Depending on the dimensions of the input array, three different
    kinds of approaches are taken. If the input array is one-dimensional
    then only a simple analysis is performed. If the array is
    bi- or three-dimensional then spectral Hovmoller diagrams are drawn
    for each Fourier period given within a range of +/-25%.

    PARAMETERS
        z (array like) :
            Input data. The data array should have one of these forms,
            z[tm], z[tm, lat] or z[tm, lat, lon].
        tm (array like) :
            Time axis. It should contain values in matplotlib date
            format (i.e. number of days since 0001-01-01 UTC).
        lon (array like, optional) :
            Longitude.
        lat (array like, optional) :
            Latitude.
        mother (string, optional) :
            Gives the name of the mother wavelet to be used. Possible
            values are 'Morlet' (default), 'Paul' or 'Mexican hat'.
        alpha (float or dictionary, optional) :
            Lag-1 autocorrelation for background noise.  Default value
            is 0.0 (white noise). If different autocorrelation
            coefficients should be used for different locations, then
            the input should contain a dictionary with 'lon', 'lat',
            'map' keys as for the std parameter.
        siglvl (float, optional) :
            Significance level. Default value is 0.95.
        loc (array like, optional) :
            Special locations of interest. If the input array is of
            higher dimenstions, the output of the simple wavelet
            analysis of each of the locations is output. The list
            should contain the pairs of (lon, lat) for each locations
            of interest.
        onlyloc (boolean, optional) :
            If set to true then only the specified locations are
            analysed. The default is false.
        periods (array like, optional) :
            Special Fourier periods of interest in case of analysis of
            higher dimensions (in years).
        sel_periods (array like, optional) :
            Select which Fourier periods spectral power are averaged.
        show (boolean, optional) :
            If set to true the the resulting maps are shown on screen.
        save (string, optional) :
            The path in which the resulting plots are to be saved. If
            not set, then no images will be saved.
        dsave (string, optional) :
            If set, saves the scale averaged power spectrum series to
            this path. This is especially useful if memory is an issue.
        prefix (string, optional) :
            Prefix to retain naming conventions such as basin.
        labels (dictionary, optional) :
            Sets the labels for the plot axis.
        title (string, array like, optional) :
            Title of each of the selected periods.
        name (string, array like, optional) :
            Name of each of the selected periods. Used when saving the
            results to files.
        fpath (string, optional) :
            Path for the source files to be loaded when memory issues
            are a concern.
        fpattern (string, optional) :
            Regular expression pattern to match file names.
        std (dictionary, optional) :
            A dictionary containing a map of the standard deviation of
            the analysed time series. To set the longitude and latitude
            coordinates of the map, they should be included as
            separate 'lon' and 'lat' key items. If they are omitted,
            then the regular input parameters are assumed. Accepted
            standard deviation error is set in key 'err' (default value
            is 1e-2).
        crange (array like, optional) :
            Array of power levels to be used in average Hovmoler colour bar.
        levels (array like, optional) :
            Array of power levels to be used in spectrogram colour bar.
        cmap (colormap, optional) :
            Sets the colour map to be used in the plots. The default is
            the Generic Mapping Tools (GMT) no green.
        debug (boolean, optional) :
            If set to True then warnings are shown.

    OUTPUT
        If show or save are set, plots either on screen and or on file
        according to the specified parameters.

        If dsave parameter is set, also saves the scale averaged power
        series to files.

    RETURNS
        wave (dictionary) :
            Dictionary containing the resulting calculations from the
            wavelet analysis according to the input parameters. The
            output items might be:
                scale --
                    Wavelet scales.
                period --
                    Equivalent Fourier periods (in days).
                power_spectrum --
                    Wavelet power spectrum (in units**2).
                power_significance --
                    Relative significance of the power spectrum.
                global_power --
                    Global wavelet power spectrum (in units**2).
                scale_spectrum  --
                    Scale averaged wavelet spectra (in units**2)
                    according to selected periods.
                scale_significance --
                    Relative significance of the scale averaged wavelet
                    spectra.
                fft --
                    Fourier spectrum.
                fft_first --
                    Fourier spectrum of the first half of the
                    time-series.
                fft_second --
                    Fourier spectrum of the second half of the
                    time-series.
                fft_period --
                    Fourier periods (in days).
                trend --
                    Signal trend (in units/yr).
                wavelet_trend --
                    Wavelet spectrum trends (in units**2/yr).

    """
    t1 = time()
    result = {}

    # Resseting unit labels for hovmoller plots
    hlabels = dict(labels)
    hlabels['units'] = ''

    # Setting some titles and paths
    if name == None:
        name = title

    # Working with the std parameter and setting its properties:
    if 'val' in std.keys():
        if 'lon' not in std.keys():
            std['lon'] = lon
        std['lon180'] = common.lon180(std['lon'])
        if 'lat' not in std.keys():
            std['lat'] = lat
        if 'err' not in std.keys():
            std['err'] = 1e-2
        std['map'] = True
    else:
        std['map'] = False

    # Lag-1 autocorrelation parameter
    if type(alpha).__name__ == 'dict':
        if 'lon' not in alpha.keys():
            alpha['lon'] = lon
        alpha['lon180'] = common.lon180(alpha['lon'])
        if 'lat' not in alpha.keys():
            alpha['lat'] = lat
        alpha['mean'] = alpha['val'].mean()
        alpha['map'] = True
        alpha['calc'] = False
    else:
        if alpha == -1:
            alpha = {'mean': -1, 'calc': True}
        else:
            alpha = {'val': alpha, 'mean': alpha, 'map': False, 'calc': False}

    # Shows some of the options on screen.
    print('Average Lag-1 autocorrelation for background noise: %.2f' %
          (alpha['mean']))
    if save:
        print 'Saving result figures in \'%s\'.' % (save)
    if dsave:
        print 'Saving result data in \'%s\'.' % (dsave)

    if fpath:
        # Gets the list of files to be loaded individually extracts all the
        # latitudes and loads the first file to get the main parameters.
        flist = os.listdir(fpath)
        flist, match = common.reglist(flist, fpattern)
        if len(flist) == 0:
            raise Warning, 'No files matched search pattern.'
        flist = numpy.asarray(flist)
        lst_lat = []
        for item in match:
            y = string.atof(item[-2])
            if item[-1].upper() == 'S': y *= -1
            lst_lat.append(y)
        # Detect file type from file name
        ftype = fm.detect_ftype(flist[0])
        x, y, tm, z = fm.load_map('%s/%s' % (fpath, flist[0]),
                                  ftype=ftype,
                                  masked=True)
        if lon == None:
            lon = x
        lat = numpy.unique(lst_lat)
        dim = 2
    else:
        # Transforms input arrays in numpy arrays and numpy masked arrays.
        tm = numpy.asarray(tm)
        z = numpy.ma.asarray(z)
        z.mask = numpy.isnan(z)

        # Determines the number of dimensions of the variable to be plotted and
        # the sizes of each dimension.
        a = b = c = None
        dim = len(z.shape)
        if dim == 3:
            c, b, a = z.shape
        elif dim == 2:
            c, a = z.shape
            b = 1
            z = z.reshape(c, b, a)
        else:
            c = z.shape[0]
            a = b = 1
            z = z.reshape(c, b, a)
        if tm.size != c:
            raise Warning, 'Time and data lengths do not match.'

    # Transforms coordinate arrays into numpy arrays
    s = type(lat).__name__
    if s in ['int', 'float', 'float64']:
        lat = numpy.asarray([lat])
    elif s != 'NoneType':
        lat = numpy.asarray(lat)
    s = type(lon).__name__
    if s in ['int', 'float', 'float64']:
        lon = numpy.asarray([lon])
    elif s != 'NoneType':
        lon = numpy.asarray(lon)

    # Starts the mother wavelet class instance and determines important
    # analysis parameters
    mother = mother.lower()
    if mother == 'morlet':
        mother = wavelet.Morlet()
    elif mother == 'paul':
        mother = wavelet.Paul()
    elif mother in ['mexican hat', 'mexicanhat', 'mexican_hat']:
        mother = wavelet.Mexican_hat()
    else:
        raise Warning, 'Mother wavelet unknown.'

    t = tm / common.daysinyear  # Time array in years
    dt = tm[1] - tm[0]  # Temporal sampling interval
    try:  # Zonal sampling interval
        dx = lon[1] - lon[0]
    except:
        dx = 1
    try:  # Meridional sampling interval
        dy = lat[1] - lat[0]
    except:
        dy = dx
    if numpy.isnan(dt): dt = 1
    if numpy.isnan(dx): dx = 1
    if numpy.isnan(dy): dy = dx
    dj = 0.25  # Four sub-octaves per octave
    s0 = 2 * dt  # Smallest scale
    J = 7 / dj - 1  # Seven powers of two with dj sub-octaves
    scales = period = None

    if type(crange).__name__ == 'NoneType':
        crange = numpy.arange(0, 1.1, 0.1)
    if type(levels).__name__ == 'NoneType':
        levels = 2.**numpy.arange(-3, 6)

    if fpath:
        N = lat.size
        # TODO: refactoring # lon = numpy.arange(-81. - dx / 2., 290. + dx / 2, dx)
        # TODO: refactoring # lat = numpy.unique(numpy.asarray(lst_lat))
        c, b, a = tm.size, lat.size, lon.size
    else:
        N = a * b

    # Making sure that the longitudes range from -180 to 180 degrees and
    # setting the squared search radius R2.
    try:
        lon180 = common.lon180(lon)
    except:
        lon180 = None
    R2 = dx**2 + dy**2
    if numpy.isnan(R2):
        R2 = 65535.
    if loc != None:
        loc = numpy.asarray([[common.lon180(item[0]), item[1]]
                             for item in loc])

    # Initializes important result variables such as the global wavelet power
    # spectrum map, scale avaraged spectrum time-series and their significance,
    # wavelet power trend map.
    global_power = numpy.ma.empty([J + 1, b, a]) * numpy.nan
    try:
        C = len(periods) + 1
        dT = numpy.diff(periods)
        pmin = numpy.concatenate([[periods[0] - dT[0] / 2],
                                  0.5 * (periods[:-1] + periods[1:])])
        pmax = numpy.concatenate(
            [0.5 * (periods[:-1] + periods[1:]), [periods[-1] + dT[-1] / 2]])
    except:
        # Sets the lowest period to null and the highest to half the time
        # series length.
        C = 1
        pmin = numpy.array([0])
        pmax = numpy.array([(tm[-1] - tm[0]) / 2])
    if type(sel_periods).__name__ in ['int', 'float']:
        sel_periods = [sel_periods]
    elif len(sel_periods) == 0:
        sel_periods = [-1.]
    try:
        if fpath:
            raise Warning, 'Process files individually'
        avg_spectrum = numpy.ma.empty([C, c, b, a]) * numpy.nan
        mem_error = False
    except:
        avg_spectrum = numpy.ma.empty([C, c, a]) * numpy.nan
        mem_error = True
    avg_spectrum_signif = numpy.ma.empty([C, b, a]) * numpy.nan
    trend = numpy.ma.empty([b, a]) * numpy.nan
    wavelet_trend = numpy.ma.empty([C, b, a]) * numpy.nan
    fft_trend = numpy.ma.empty([C, b, a]) * numpy.nan
    std_map = numpy.ma.empty([b, a]) * numpy.nan
    zero = numpy.ma.empty([c, a])
    fft_spectrum = None
    fft_spectrum1 = None
    fft_spectrum2 = None

    # Walks through each latitude and then through each longitude to perform
    # the temporal wavelet analysis.
    if N == 1:
        plural = ''
    else:
        plural = 's'
    s = 'Spectral analysis of %d location%s... ' % (N, plural)
    stdout.write(s)
    stdout.flush()
    for j in range(b):
        t2 = time()
        isloc = False  # Ressets 'is special location' flag
        hloc = []  # Cleans location list for Hovmoller plots
        zero *= numpy.nan
        if mem_error:
            # Clears average spectrum for next step.
            avg_spectrum *= numpy.nan
            avg_spectrum.mask = False
        if fpath:
            findex = pylab.find(lst_lat == lat[j])
            if len(findex) == 0:
                continue
            ftype = fm.detect_ftype(flist[findex[0]])
            try:
                x, y, tm, z = fm.load_dataset(fpath,
                                              flist=flist[findex],
                                              ftype=ftype,
                                              masked=True,
                                              lon=lon,
                                              lat=lat[j:j + 1],
                                              verbose=True)
            except:
                continue
            z = z[:, 0, :]
            x180 = common.lon180(x)

        # Determines the first and second halves of the time-series and some
        # constants for the FFT
        fft_ta = numpy.ceil(t.min())
        fft_tb = numpy.floor(t.max())
        fft_tc = numpy.round(fft_ta + fft_tb) / 2
        fft_ia = pylab.find((t >= fft_ta) & (t <= fft_tc))
        fft_ib = pylab.find((t >= fft_tc) & (t <= fft_tb))
        fft_N = int(2**numpy.ceil(numpy.log2(max([len(fft_ia), len(fft_ib)]))))
        fft_N2 = fft_N / 2 - 1
        fft_dt = t[fft_ib].mean() - t[fft_ia].mean()

        for i in range(a):
            # Some string output.
            try:
                Y, X = common.num2latlon(lon[i],
                                         lat[j],
                                         mode='each',
                                         padding=False)
            except:
                Y = X = '?'

            # Extracts individual time-series from the whole dataset and
            # sets or calculates its standard deviation, squared standard
            # deviation and finally the normalized time-series.
            if fpath:
                try:
                    ilon = pylab.find(x == lon[i])[0]
                    fz = z[:, ilon]
                except:
                    continue
            else:
                fz = z[:, j, i]
            if fz.mask.all():
                continue
            if std['map']:
                try:
                    u = pylab.find(std['lon180'] == lon180[i])[0]
                    v = pylab.find(std['lat'] == lat[j])[0]
                except:
                    if debug:
                        warnings.warn(
                            'Unable to locate standard deviation '
                            'for (%s, %s)' % (X, Y), Warning)
                    continue
                fstd = std['val'][v, u]
                estd = fstd - fz.std()
                if (estd < 0) & (abs(estd) > std['err']):
                    if debug:
                        warnings.warn('Discrepant input standard deviation '
                                      '(%f) location (%.3f, %.3f) will be '
                                      'disregarded.' %
                                      (estd, lon180[i], lat[j]))
                    continue
            else:
                fstd = fz.std()
            fstd2 = fstd**2
            std_map[j, i] = fstd
            zero[:, i] = fz
            fz = (fz - fz.mean()) / fstd

            # Calculates the distance of the current point to any special
            # location set in the 'loc' parameter. If only special locations
            # are to be analysed, then skips all other ones. If the input
            # array is one dimensional, then do the analysis anyway.
            if dim == 1:
                dist = numpy.asarray([0.])
            else:
                try:
                    dist = numpy.asarray([
                        ((item[0] - (lon180[i]))**2 + (item[1] - lat[j])**2)
                        for item in loc
                    ])
                except:
                    dist = []
            if (dist > R2).all() & (loc != 'all') & onlyloc:
                continue

            # Determines the lag-1 autocorrelation coefficient to be used in
            # the significance test from the input parameter
            if alpha['calc']:
                ac = acorr(fz)
                alpha_ij = (ac[c + 1] + ac[c + 2]**0.5) / 2
            elif alpha['map']:
                try:
                    u = pylab.find(alpha['lon180'] == lon180[i])[0]
                    v = pylab.find(alpha['lat'] == lat[j])[0]
                    alpha_ij = alpha['val'][v, u]
                except:
                    if debug:
                        warnings.warn(
                            'Unable to locate standard deviation '
                            'for (%s, %s) using mean value instead' % (X, Y),
                            Warning)
                    alpha_ij = alpha['mean']
            else:
                alpha_ij = alpha['mean']

            # Calculates the continuous wavelet transform using the wavelet
            # Python module. Calculates the wavelet and Fourier power spectrum
            # and the periods in days. Also calculates the Fourier power
            # spectrum for the first and second halves of the timeseries.
            wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
                fz, dt, dj, s0, J, mother)
            power = abs(wave * wave.conj())
            fft_power = abs(fft * fft.conj())
            period = 1. / freqs
            fftperiod = 1. / fftfreqs
            psel = pylab.find(period <= pmax.max())

            # Calculates the Fourier transform for the first and the second
            # halves ot the time-series for later trend analysis.
            fft_1 = numpy.fft.fft(fz[fft_ia], fft_N)[1:fft_N / 2] / fft_N**0.5
            fft_2 = numpy.fft.fft(fz[fft_ib], fft_N)[1:fft_N / 2] / fft_N**0.5
            fft_p1 = abs(fft_1 * fft_1.conj())
            fft_p2 = abs(fft_2 * fft_2.conj())

            # Creates FFT return array and stores the spectrum accordingly
            try:
                fft_spectrum[:, j, i] = fft_power * fstd2
                fft_spectrum1[:, j, i] = fft_p1 * fstd2
                fft_spectrum2[:, j, i] = fft_p2 * fstd2
            except:
                fft_spectrum = (numpy.ma.empty([len(fft_power), b, a]) *
                                numpy.nan)
                fft_spectrum1 = (numpy.ma.empty([fft_N2, b, a]) * numpy.nan)
                fft_spectrum2 = (numpy.ma.empty([fft_N2, b, a]) * numpy.nan)
                #
                fft_spectrum[:, j, i] = fft_power * fstd2
                fft_spectrum1[:, j, i] = fft_p1 * fstd2
                fft_spectrum2[:, j, i] = fft_p2 * fstd2

            # Performs the significance test according to the article by
            # Torrence and Compo (1998). The wavelet power is significant
            # if the ratio power/sig95 is > 1.
            signif, fft_theor = wavelet.significance(1.,
                                                     dt,
                                                     scales,
                                                     0,
                                                     alpha_ij,
                                                     significance_level=siglvl,
                                                     wavelet=mother)
            sig95 = (signif * numpy.ones((c, 1))).transpose()
            sig95 = power / sig95

            # Calculates the global wavelet power spectrum and its
            # significance. The global wavelet spectrum is the average of the
            # wavelet power spectrum over time. The degrees of freedom (dof)
            # have to be corrected for padding at the edges.
            glbl_power = power.mean(axis=1)
            dof = c - scales
            glbl_signif, tmp = wavelet.significance(1.,
                                                    dt,
                                                    scales,
                                                    1,
                                                    alpha_ij,
                                                    significance_level=siglvl,
                                                    dof=dof,
                                                    wavelet=mother)
            global_power[:, j, i] = glbl_power * fstd2

            # Calculates the average wavelet spectrum along the scales and its
            # significance according to Torrence and Compo (1998) eq. 24. The
            # scale_avg_full variable is used multiple times according to the
            # selected periods range.
            #
            # Also calculates the average Fourier power spectrum.
            Cdelta = mother.cdelta
            scale_avg_full = (scales * numpy.ones((c, 1))).transpose()
            scale_avg_full = power / scale_avg_full
            for k in range(C):
                if k == 0:
                    sel = pylab.find((period >= pmin[0])
                                     & (period <= pmax[-1]))
                    pminmax = [period[sel[0]], period[sel[-1]]]
                    les = pylab.find((fftperiod >= pmin[0])
                                     & (fftperiod <= pmax[-1]))
                    fminmax = [fftperiod[les[0]], fftperiod[les[-1]]]
                else:
                    sel = pylab.find((period >= pmin[k - 1])
                                     & (period < pmax[k - 1]))
                    pminmax = [pmin[k - 1], pmax[k - 1]]
                    les = pylab.find((fftperiod >= pmin[k - 1])
                                     & (fftperiod <= pmax[k - 1]))
                    fminmax = [fftperiod[les[0]], fftperiod[les[-1]]]

                scale_avg = numpy.ma.array(
                    (dj * dt / Cdelta * scale_avg_full[sel, :].sum(axis=0)))
                scale_avg_signif, tmp = wavelet.significance(
                    1.,
                    dt,
                    scales,
                    2,
                    alpha_ij,
                    significance_level=siglvl,
                    dof=[scales[sel[0]], scales[sel[-1]]],
                    wavelet=mother)
                scale_avg.mask = (scale_avg < scale_avg_signif)
                if mem_error:
                    avg_spectrum[k, :, i] = scale_avg
                else:
                    avg_spectrum[k, :, j, i] = scale_avg
                avg_spectrum_signif[k, j, i] = scale_avg_signif

                # Trend analysis using least square polynomial fit of one
                # degree of the original input data and scale averaged
                # wavelet power. The wavelet power trend is calculated only
                # where the cone of influence spans the highest analyzed
                # period. In the end, the returned value for the trend is in
                # units**2.
                #
                # Also calculates the trends in the Fourier power spectrum.
                # Note that the FFT power spectrum is already multiplied by
                # the signal's standard deviation.
                incoi = pylab.find(coi >= pmax[-1])
                if len(incoi) == 0:
                    incoi = numpy.arange(c)
                polyw = numpy.polyfit(t[incoi], scale_avg[incoi].data, 1)
                wavelet_trend[k, j, i] = polyw[0] * fstd2
                fft_trend[k, j, i] = (
                    fft_spectrum2[les[les < fft_N2], j, i] -
                    fft_spectrum1[les[les < fft_N2], j, i]).mean() / fft_dt
                if k == 0:
                    polyz = numpy.polyfit(t, fz * fstd, 1)
                    trend[j, i] = polyz[0]

                # Plots the wavelet analysis results for the individual
                # series. The plot is only generated if the dimension of the
                # input variable z is one, if a special location is within a
                # range of the search radius R and if the show or save
                # parameters are set.
                if (show | (save != '')) & ((k in sel_periods)):
                    if (dist < R2).any() | (loc == 'all') | (dim == 1):
                        # There is an interesting spot within the search
                        # radius of location (%s, %s).' % (Y, X)
                        isloc = True
                        if (dist < R2).any():
                            try:
                                hloc.append(loc[(dist < R2)][0, 0])
                            except:
                                pass
                        if save:
                            try:
                                sv = '%s/tz_%s_%s_%d' % (
                                    save, prefix,
                                    common.num2latlon(lon[i], lat[j]), k)
                            except:
                                sv = '%s' % (save)
                        else:
                            sv = ''
                        graphics.wavelet_plot(tm,
                                              period[psel],
                                              fz,
                                              power[psel, :],
                                              coi,
                                              glbl_power[psel],
                                              scale_avg.data,
                                              fft=fft,
                                              fft_period=fftperiod,
                                              power_signif=sig95[psel, :],
                                              glbl_signif=glbl_signif[psel],
                                              scale_signif=scale_avg_signif,
                                              pminmax=pminmax,
                                              labels=labels,
                                              normalized=True,
                                              std=fstd,
                                              ztrend=polyz,
                                              wtrend=polyw,
                                              show=show,
                                              save=sv,
                                              levels=levels,
                                              cmap=cmap)

        # Saves and/or plots the intermediate results as zonal temporal
        # diagrams.
        if dsave:
            for k in range(C):
                if k == 0:
                    sv = '%s/%s/%s_%s.xt.gz' % (
                        dsave, 'global', prefix,
                        common.num2latlon(lon[i], lat[j], mode='each')[0])
                else:
                    sv = '%s/%s/%s_%s.xt.gz' % (
                        dsave, name[k - 1].lower(), prefix,
                        common.num2latlon(lon[i], lat[j], mode='each')[0])
                if mem_error:
                    fm.save_map(lon, tm, avg_spectrum[k, :, :].data, sv,
                                lat[j])
                else:
                    fm.save_map(lon, tm, avg_spectrum[k, :, j, :].data, sv,
                                lat[j])

        if ((dim > 1) and (show or (save != '')) & (not onlyloc)
                and len(hloc) > 0):
            hloc = common.lon360(numpy.unique(hloc))
            if save:
                sv = '%s/xt_%s_%s' % (save, prefix,
                                      common.num2latlon(
                                          lon[i], lat[j], mode='each')[0])
            else:
                sv = ''
            if mem_error:
                # To include overlapping original signal, use zz=zero
                gis.hovmoller(lon,
                              tm,
                              avg_spectrum[1:, :, :],
                              zo=avg_spectrum_signif[1:, j, :],
                              title=title,
                              crange=crange,
                              show=show,
                              save=sv,
                              labels=hlabels,
                              loc=hloc,
                              cmap=cmap,
                              bottom='avg',
                              right='avg',
                              std=std_map[j, :])
            else:
                gis.hovmoller(lon,
                              tm,
                              avg_spectrum[1:, :, j, :],
                              zo=avg_spectrum_signif[1:, j, :],
                              title=title,
                              crange=crange,
                              show=show,
                              save=sv,
                              labels=hlabels,
                              loc=hloc,
                              cmap=cmap,
                              bottom='avg',
                              right='avg',
                              std=std_map[j, :])

        # Flushing profiling text.
        stdout.write(len(s) * '\b')
        s = 'Spectral analysis of %d location%s (%s)... %s ' % (
            N, plural, Y, common.profiler(b, j + 1, 0, t1, t2))
        stdout.write(s)
        stdout.flush()

    stdout.write('\n')

    result['scale'] = scales
    result['period'] = period
    if dim == 1:
        result['power_spectrum'] = power * fstd2
        result['power_significance'] = sig95
        result['cwt'] = wave
        result['fft'] = fft
    result['global_power'] = global_power
    result['scale_spectrum'] = avg_spectrum
    if fpath:
        result['lon'] = lon
        result['lat'] = lat
    result['scale_significance'] = avg_spectrum_signif
    result['trend'] = trend
    result['wavelet_trend'] = wavelet_trend
    result['fft_power'] = fft_spectrum
    result['fft_first'] = fft_spectrum1
    result['fft_second'] = fft_spectrum2
    result['fft_period'] = fftperiod
    result['fft_trend'] = fft_trend
    return result
Пример #25
0
def plot_wavelet(t,
                 dat,
                 dt,
                 pl,
                 pr,
                 period_pltlim=None,
                 ax=None,
                 ax2=None,
                 stscale=2,
                 siglev=0.95,
                 cmap='viridis',
                 title='',
                 levels=None,
                 label='',
                 units='',
                 tunits='',
                 sav_img=False):
    import pycwt as wavelet
    from pycwt.helpers import find
    import numpy as np
    import matplotlib.pyplot as plt
    from copy import copy
    import numpy.ma as ma

    t_ = copy(t)
    t0 = t[0]
    # print(Time(t[-1:], format='plot_date').iso)
    # We also create a time array in years.
    N = dat.size
    t = np.arange(0, N) * dt + t0
    # print(Time(t[-1:], format='plot_date').iso)
    # We write the following code to detrend and normalize the input data by its
    # standard deviation. Sometimes detrending is not necessary and simply
    # removing the mean value is good enough. However, if your dataset has a well
    # defined trend, such as the Mauna Loa CO\ :sub:`2` dataset available in the
    # above mentioned website, it is strongly advised to perform detrending.
    # Here, we fit a one-degree polynomial function and then subtract it from the
    # original data.
    p = np.polyfit(t - t0, dat, 1)
    dat_notrend = dat - np.polyval(p, t - t0)
    std = dat_notrend.std()  # Standard deviation
    var = std**2  # Variance
    dat_norm = dat_notrend / std  # Normalized dataset

    # The next step is to define some parameters of our wavelet analysis. We
    # select the mother wavelet, in this case the Morlet wavelet with
    # :math:`\omega_0=6`.
    mother = wavelet.Morlet(6)
    s0 = stscale * dt  # Starting scale, in this case 2 * 0.25 years = 6 months
    dj = 1 / 12  # Twelve sub-octaves per octaves
    J = -1  # 7 / dj  # Seven powers of two with dj sub-octaves
    alpha, _, _ = wavelet.ar1(dat)  # Lag-1 autocorrelation for red noise

    # The following routines perform the wavelet transform and inverse wavelet
    # transform using the parameters defined above. Since we have normalized our
    # input time-series, we multiply the inverse transform by the standard
    # deviation.
    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
        dat_norm, dt, dj, s0, J, mother)
    iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std

    # We calculate the normalized wavelet and Fourier power spectra, as well as
    # the Fourier equivalent periods for each wavelet scale.
    power = (np.abs(wave))**2
    fft_power = np.abs(fft)**2
    period = 1 / freqs

    # We could stop at this point and plot our results. However we are also
    # interested in the power spectra significance test. The power is significant
    # where the ratio ``power / sig95 > 1``.
    signif, fft_theor = wavelet.significance(1.0,
                                             dt,
                                             scales,
                                             0,
                                             alpha,
                                             significance_level=siglev,
                                             wavelet=mother)
    sig95 = np.ones([1, N]) * signif[:, None]
    sig95 = power / sig95

    # Then, we calculate the global wavelet spectrum and determine its
    # significance level.
    glbl_power = power.mean(axis=1)
    dof = N - scales  # Correction for padding at edges
    glbl_signif, tmp = wavelet.significance(var,
                                            dt,
                                            scales,
                                            1,
                                            alpha,
                                            significance_level=siglev,
                                            dof=dof,
                                            wavelet=mother)

    # We also calculate the scale average between 2 years and 8 years, and its
    # significance level.
    sel = find((period >= pl) & (period < pr))
    Cdelta = mother.cdelta
    scale_avg = (scales * np.ones((N, 1))).transpose()
    scale_avg = power / scale_avg  # As in Torrence and Compo (1998) equation 24
    scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0)
    scale_avg_signif, tmp = wavelet.significance(
        var,
        dt,
        scales,
        2,
        alpha,
        significance_level=siglev,
        dof=[scales[sel[0]], scales[sel[-1]]],
        wavelet=mother)

    # levels = [0.25, 0.5, 1, 2, 4, 8, 16,32]
    if levels is None:
        levels = np.linspace(0.0, 128., 256)
    # ax.contourf(t, np.log2(period), np.log2(power), np.log2(levels), extend='both', cmap=plt.cm.viridis)
    im = ax.contourf(t_,
                     np.array(period) * 24 * 60,
                     power,
                     levels,
                     extend='both',
                     cmap=cmap,
                     zorder=-20)
    # for pathcoll in im.collections:
    #     pathcoll.set_rasterized(True)
    ax.set_rasterization_zorder(-10)
    # im = ax.pcolormesh(t_, np.array(period) * 24 * 60, power,vmax=32.,vmin=0, cmap=cmap)
    # im = ax.contourf(t, np.array(period)*24*60, np.log2(power), np.log2(levels), extend='both', cmap=cmap)
    extent = [t_.min(), t_.max(), 0, max(period) * 24 * 60]
    # ax.contour(t, np.log2(period), sig95, [-99, 1], colors='k', linewidths=1, extent=extent)
    CS = ax.contour(t_,
                    np.array(period) * 24 * 60,
                    sig95 * siglev, [-99, 1.0 * siglev],
                    colors='k',
                    linewidths=1,
                    extent=extent)
    ax.clabel(CS, inline=1, fmt='%1.3f')
    ax.fill(np.concatenate(
        [t_, t_[-1:] + dt, t_[-1:] + dt, t_[:1] - dt, t_[:1] - dt]),
            np.concatenate([
                np.array(coi), [2**(1e-9)],
                np.array(period[-1:]),
                np.array(period[-1:]), [2**(1e-9)]
            ]) * 24 * 60,
            color='k',
            alpha=0.75,
            edgecolor='None',
            facecolor='k',
            hatch='x')
    # ### not Matplotlib does not display hatching when rendering to pdf. Here is a workaround.
    # ax.fill(np.concatenate([t_, t_[-1:] + dt, t_[-1:] + dt, t_[:1] - dt, t_[:1] - dt]),
    #         np.concatenate(
    #             [np.array(coi), [2 ** (1e-9)], np.array(period[-1:]), np.array(period[-1:]),
    #              [2 ** (1e-9)]]) * 24 * 60,
    #         color='None', alpha=1.0, edgecolor='k', hatch='x')
    # ax.set_title('b) {} Wavelet Power Spectrum ({})'.format(label, mother.name))
    #
    # ax.set_rasterization_zorder(20)
    # Yticks = np.arange(np.ceil(np.array(period.min()*24*60)), np.ceil(np.array(period.max()*24*60)))
    # ax.set_yticks(np.array(Yticks))
    # ax.set_yticklabels(Yticks)

    ax2.plot(glbl_signif, np.array(period) * 24 * 60, 'k--')
    # ax2.plot(var * fft_theor, np.array(period) * 24 * 60, '--', color='#cccccc')
    # ax2.plot(var * fft_power, np.array(1. / fftfreqs) * 24 * 60, '-', color='#cccccc',
    #          linewidth=1.)
    ax2.plot(var * glbl_power, np.array(period) * 24 * 60, 'k-', linewidth=1)
    mperiod = ma.masked_outside(np.array(period), period_pltlim[0],
                                period_pltlim[1])
    mpower = ma.masked_array(var * glbl_power, mask=mperiod.mask)
    # ax2.set_title('c) Global Wavelet Spectrum')
    ax2.set_xlabel(r'Power'.format(units))
    ax2.set_xlim([0, mpower.compressed().max() + var])
    # print(glbl_power)
    # ax2.set_ylim(np.array([period.min(), period.max()]))
    # ax2.set_yticks(np.array(Yticks))
    # ax2.set_yticklabels(Yticks)
    plt.setp(ax2.get_yticklabels(), visible=False)

    if period_pltlim:
        ax.set_ylim(np.array(period_pltlim) * 24 * 60)
    else:
        ax.set_ylim(np.array([period.min(), period.max()]) * 24 * 60)

    return im
Пример #26
0
def time_averaged_xWT(y1,
                      y2,
                      dt,
                      avg_type='phased',
                      high_f=0.11,
                      low_f=0.005,
                      min_scan_time=6 * 60):
    if avg_type == 'phased':
        import pycwt
        mother = pycwt.Morlet(
            6
        )  #setting a Morlet wavelet with omega=6, which sets the time-frequency resolution of the wavelet and is recommended in Chang and Glover
        s0 = 1 / high_f
        dj = 1 / 12  # Twelve sub-octaves per octaves
        num_powers = 0
        while ((1 / high_f) * (2**num_powers) < 1 / low_f):
            num_powers += 1
        J = int(num_powers / dj)

        xWT, coi, freq = cross_wavelet_transform(y1,
                                                 y2,
                                                 dt,
                                                 dj=dj,
                                                 s0=s0,
                                                 J=J,
                                                 wavelet=mother,
                                                 normalize=True)
        xWT_amp = np.abs(xWT)
        xWT_ang = np.angle(xWT)
        xWT_phased = xWT_amp * np.cos(xWT_ang)

        freqs_coi = 1 / coi
        #threshold the frequencies at the max frequencies of the wavelet
        pos_indices = freqs_coi > np.max(freq)
        neg_indices = freqs_coi < np.min(freq)
        for i in range(len(freqs_coi)):
            if pos_indices[i]:
                freqs_coi[i] = np.max(freq)
            elif neg_indices[i]:
                freqs_coi[i] = np.min(freq)

        within_freqs_coi = np.zeros(xWT.shape)
        for i in range(len(freqs_coi)):
            freq_index = 0
            while (freq[freq_index] > freqs_coi[i]):
                freq_index += 1
            within_freqs_coi[:freq_index, i] = 1

        num_coi_points = np.sum(within_freqs_coi, 1)
        valid_freq = num_coi_points > min_scan_time / dt
        if not valid_freq[0]:
            raise ValueError(
                'The minimum number of timepoints within the cone of influence is not met for any frequencies.'
            )

        xWT_phased_avg = np.zeros(np.sum(valid_freq))
        xWT_phased_var = np.zeros(np.sum(valid_freq))
        xWT_amp_avg = np.zeros(np.sum(valid_freq))
        xWT_amp_var = np.zeros(np.sum(valid_freq))
        xWT_ang_avg = np.zeros(np.sum(valid_freq))
        xWT_ang_var = np.zeros(np.sum(valid_freq))
        #calculate the temporal means and variabilities within the coi at frequencies
        # which respect the minimum time length requirement
        for i in range(len(valid_freq)):
            if valid_freq[i]:
                #get the indices of data falling within the coi
                indices = [
                    j for j, x in enumerate(within_freqs_coi[i, :]) if x
                ]
                xWT_phased_avg[i] = np.mean(xWT_phased[i, indices])
                xWT_phased_var[i] = np.std(xWT_phased[i, indices])
                xWT_amp_avg[i] = np.mean(xWT_amp[i, indices])
                xWT_amp_var[i] = np.std(xWT_amp[i, indices])
                xWT_ang_avg[i] = mean_resultant_length(xWT_ang[i, indices])
                xWT_ang_var[i] = circular_standard_deviation(xWT_ang[i,
                                                                     indices])

        return xWT_phased_avg, xWT_phased_var, xWT_amp_avg, xWT_amp_var, xWT_ang_avg, xWT_ang_var, freq[:np.sum(
            valid_freq)]


#    elif avg_type=='quadrants':
#this type will calculate a different average for each quadrants of the phase in the WCT
    return None
Пример #27
0
label = 'NINO3 SST'
units = 'degC'
t0 = 1871.0
dt = 0.25  # In years

N = dat.size
t = numpy.arange(0, N) * dt + t0

p = numpy.polyfit(t - t0, dat, 1)
dat_notrend = dat - numpy.polyval(p, t - t0)
std = dat_notrend.std()  # Standard deviation
var = std**2  # Variance
dat_norm = dat_notrend / std  # Normalized dataset

## Define wavelet parameters
mother = wavelet.Morlet(6)
s0 = 2 * dt  # Starting scale, in this case 2 * 0.25 years = 6 months
dj = 1 / 12  # Twelve sub-octaves per octaves
J = 7 / dj  # Seven powers of two with dj sub-octaves
alpha, _, _ = wavelet.ar1(dat)  # Lag-1 autocorrelation for red noise

wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(dat_norm, dt, dj, s0, J,
                                                      mother)
iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std

power = (numpy.abs(wave))**2
fft_power = numpy.abs(fft)**2
period = 1 / freqs

power /= scales[:, None]
def do_wavelet_transform(dat, dt):

    t0 = 0
    # dt = 0.25  # In years

    # We also create a time array in years.
    N = dat.size
    t = np.arange(0, N) * dt + t0
    '''
    We write the following code to detrend and normalize the input data by its
    standard deviation. Sometimes detrending is not necessary and simply
    removing the mean value is good enough. However, if your dataset has a well
    defined trend, such as the Mauna Loa CO\ :sub:`2` dataset available in the
    above mentioned website, it is strongly advised to perform detrending.
    Here, we fit a one-degree polynomial function and then subtract it from the
    original data.
    '''
    p = np.polyfit(t - t0, dat, 1)
    dat_notrend = dat - np.polyval(p, t - t0)
    std = dat_notrend.std()  # Standard deviation
    var = std**2  # Variance
    dat_norm = dat_notrend / std  # Normalized dataset

    # The next step is to define some parameters of our wavelet analysis. We
    # select the mother wavelet, in this case the Morlet wavelet with
    # :math:`\omega_0=6`.
    mother = wavelet.Morlet(6)
    s0 = 2 * dt  # Starting scale, in this case 2 * 0.25 years = 6 months
    dj = 1 / 12  # Twelve sub-octaves per octaves
    J = 7 / dj  # Seven powers of two with dj sub-octaves
    sr = pd.Series(dat)
    alpha = sr.autocorr(lag=1)  # Lag-1 autocorrelation for red noise
    '''
    The following routines perform the wavelet transform and inverse wavelet
    transform using the parameters defined above. Since we have normalized our
    input time-series, we multiply the inverse transform by the standard
    deviation.
    '''
    wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
        dat_norm, dt, dj, s0, J, mother)
    iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std

    # We calculate the normalized wavelet and Fourier power spectra, as well as
    # the Fourier equivalent periods for each wavelet scale.
    power = (np.abs(wave))**2
    fft_power = np.abs(fft)**2
    period = 1 / freqs
    '''
    We could stop at this point and plot our results. However we are also
    interested in the power spectra significance test. The power is significant
    where the ratio ``power / sig95 > 1``.
    '''
    signif, fft_theor = wavelet.significance(1.0,
                                             dt,
                                             scales,
                                             0,
                                             alpha,
                                             significance_level=0.95,
                                             wavelet=mother)
    sig95 = np.ones([1, N]) * signif[:, None]
    sig95 = power / sig95

    # Then, we calculate the global wavelet spectrum and determine its
    # significance level.
    glbl_power = power.mean(axis=1)
    dof = N - scales  # Correction for padding at edges
    glbl_signif, tmp = wavelet.significance(var,
                                            dt,
                                            scales,
                                            1,
                                            alpha,
                                            significance_level=0.95,
                                            dof=dof,
                                            wavelet=mother)

    # We also calculate the scale average between 2 years and 8 years, and its
    # significance level.
    sel = find((period >= 2) & (period < 8))
    Cdelta = mother.cdelta
    scale_avg = (scales * np.ones((N, 1))).transpose()
    # As in Torrence and Compo (1998) equation 24
    scale_avg = power / scale_avg
    scale_avg = var * dj * dt / Cdelta * scale_avg[sel, :].sum(axis=0)
    scale_avg_signif, tmp = wavelet.significance(
        var,
        dt,
        scales,
        2,
        alpha,
        significance_level=0.95,
        dof=[scales[sel[0]], scales[sel[-1]]],
        wavelet=mother)

    return dat, t, \
        period, power, coi, wave, \
        scales, dt, dj, mother, sig95, \
        glbl_power, glbl_signif, \
        scale_avg_signif, scale_avg, \
        std, iwave, var, \
        fft_theor, fft_power, fftfreqs
Пример #29
0
    # get the log spaced frequencies
    numfreqs = 50
    nyq = np.floor_divide(sample_rate[0], 2.0)
    maxfreq = np.min([100, nyq])
    minfreq = 2
    freqs = np.logspace(np.log10(minfreq), np.log10(maxfreq), num=numfreqs)

    # make an empty ndarray to hold the freq * electrode * timepoint data
    powers_by_freq = np.zeros(shape=(len(freqs), og_shape[0], og_shape[1]))

    # convolve!
    for i, freq in enumerate(freqs):
        wav_transform = wavelet.cwt(data,
                                    1 / sample_rate[0],
                                    freqs=np.full(1, freq),
                                    wavelet=wavelet.Morlet(4))
        # get the power and reshape data back into original shape
        wav_transform = (np.abs(wav_transform[0])**2).reshape(og_shape)
        powers_by_freq[i] = np.log(wav_transform)

    # prep some variables for the robust regression done in parallel
    xs = np.log(freqs).reshape(-1, 1)
    midpoint = (np.log(maxfreq) - np.log(minfreq)) / 2

    nworkers = int(config['nnodes'] * config['ppn'] * 0.5)

    # get the indices for the chunks
    chunk_indices = array_split(powers_by_freq, nworkers, axis=2)

    mhq = Queue(nworkers)
    mh_list = []
Пример #30
0
import pycwt
import numpy as np
from pylab import *

filename = 'sst_nino3.dat'
data = loadtxt(filename)

# remove mean
data = (data - np.nansum(data) / len(data))
data[np.isnan(data)] = 0

t = pycwt.cwt(data, pycwt.Morlet(), octaves=8, dscale=0.1)

b = pycwt.bootstrap_signif(t, 200)
imshow(t.power(), aspect='auto')
contour(b, levels=[0.05], colors='w')
figure()
plot(pycwt.time_avg(t), t.scales)