Esempio n. 1
0
def test_swt_decomposition():
    x = [3, 7, 1, 3, -2, 6, 4, 6]
    db1 = pywt.Wavelet('db1')
    (cA3, cD3), (cA2, cD2), (cA1, cD1) = pywt.swt(x, db1, level=3)
    expected_cA1 = [7.07106781, 5.65685425, 2.82842712, 0.70710678,
                    2.82842712, 7.07106781, 7.07106781, 6.36396103]
    assert_allclose(cA1, expected_cA1)
    expected_cD1 = [-2.82842712, 4.24264069, -1.41421356, 3.53553391,
                    -5.65685425, 1.41421356, -1.41421356, 2.12132034]
    assert_allclose(cD1, expected_cD1)
    expected_cA2 = [7, 4.5, 4, 5.5, 7, 9.5, 10, 8.5]
    assert_allclose(cA2, expected_cA2, rtol=tol_double)
    expected_cD2 = [3, 3.5, 0, -4.5, -3, 0.5, 0, 0.5]
    assert_allclose(cD2, expected_cD2, rtol=tol_double, atol=1e-14)
    expected_cA3 = [9.89949494, ] * 8
    assert_allclose(cA3, expected_cA3)
    expected_cD3 = [0.00000000, -3.53553391, -4.24264069, -2.12132034,
                    0.00000000, 3.53553391, 4.24264069, 2.12132034]
    assert_allclose(cD3, expected_cD3)

    # level=1, start_level=1 decomposition should match level=2
    res = pywt.swt(cA1, db1, level=1, start_level=1)
    cA2, cD2 = res[0]
    assert_allclose(cA2, expected_cA2, rtol=tol_double)
    assert_allclose(cD2, expected_cD2, rtol=tol_double, atol=1e-14)

    coeffs = pywt.swt(x, db1)
    assert_(len(coeffs) == 3)
    assert_(pywt.swt_max_level(len(x)) == 3)
Esempio n. 2
0
def _collect_coefficients(data, genome, loci):
    l = loci
    scale = genome[l.scale]
    (temps, loads) = (data['Temperature'], data['Load'])
    loads_coeffs = pywt.swt(loads, 'haar', level=scale)
    temps_coeffs = pywt.swt(temps, 'haar', level=scale)

    # The first 2^scale datapoints cannot be used to predict because of lack of
    # history. scale+1 because of the smooth array.
    a = np.zeros((len(loads) - 2**scale, 2*(scale+1)*genome[l.Aj]))

    # Collect coefficients for each scale + smooth array.
    for i in range(len(a)):
        row = []
        # cAn, the smoothest of the smooth arrays.
        for k in range(1, genome[l.Aj]+1):
            row.append(loads_coeffs[-1][0][2**scale + i - 2**scale*(k-1)])
            row.append(temps_coeffs[-1][0][2**scale + i - 2**scale*(k-1)])
        # cD, the details.
        for j in range(1, scale+1):
            for k in range(1, genome[l.Aj]+1):
                row.append(loads_coeffs[j-1][1][2**scale + i - 2**j*(k-1)])
                row.append(temps_coeffs[j-1][1][2**scale + i - 2**j*(k-1)])

        a[i] = np.array(row)

    return a
Esempio n. 3
0
def muunna(sarake):
    aa = [];
    bb = []
    #pituus
    # floor (pituus/16000)
    loops = range(1,int(math.floor(len(sarake)/16000)))
    max_loops = int(len(sarake) - math.floor(len(sarake)/16000)*16000)
    
    wave2 = pywt.Wavelet('db4')
    
    sarake1 = np.array(sarake[0:16000])

    (cA6, cD6), (cA5, cD5), (cA4, cD4), (cA3, cD3), (cA2, cD2), (cA1, cD1) = pywt.swt(sarake1, wave2, level=6)
    aa = np.column_stack((cD1,cD2,cD3,cD4,cD5,cD6))
    
    for loop in loops:
    
        # loop yli len(col) mod 16000 tai 8000
        sarake1 = np.array(sarake[16000*loop:16000*(loop+1)])
    
        (cA6, cD6), (cA5, cD5), (cA4, cD4), (cA3, cD3), (cA2, cD2), (cA1, cD1) = pywt.swt(sarake1, wave2, level=6)
        bb = np.column_stack((cD1,cD2,cD3,cD4,cD5,cD6))
        aa = np.vstack((aa,bb))
    
    # lopussa valitaan viimeiset 16000
    # ja sijoitetaan kohtaan [(loops*16000):len(sarake)]
    sarake2 = np.array(sarake[(len(sarake) - 16000):len(sarake)])
    (cA6, cD6), (cA5, cD5), (cA4, cD4), (cA3, cD3), (cA2, cD2), (cA1, cD1) = pywt.swt(sarake2, wave2, level=6)
    bb = np.column_stack((cD1,cD2,cD3,cD4,cD5,cD6))[(len(cD1)-max_loops):len(cD1)]
    
    aa = np.vstack((aa,bb))
    #aa = np.concatenate((aa,bb), axis=0)
    #bb.append(np.concatenate(aa2, axis=0))
    return aa
Esempio n. 4
0
def test_swt_axis():
    x = [3, 7, 1, 3, -2, 6, 4, 6]

    db1 = pywt.Wavelet('db1')
    (cA2, cD2), (cA1, cD1) = pywt.swt(x, db1, level=2)

    # test cases use 2D arrays based on tiling x along an axis and then
    # calling swt along the other axis.
    for order in ['C', 'F']:
        # test SWT of 2D data along default axis (-1)
        x_2d = np.asarray(x).reshape((1, -1))
        x_2d = np.concatenate((x_2d, )*5, axis=0)
        if order == 'C':
            x_2d = np.ascontiguousarray(x_2d)
        elif order == 'F':
            x_2d = np.asfortranarray(x_2d)
        (cA2_2d, cD2_2d), (cA1_2d, cD1_2d) = pywt.swt(x_2d, db1, level=2)

        for c in [cA2_2d, cD2_2d, cA1_2d, cD1_2d]:
            assert_(c.shape == x_2d.shape)
        # each row should match the 1D result
        for row in cA1_2d:
            assert_array_equal(row, cA1)
        for row in cA2_2d:
            assert_array_equal(row, cA2)
        for row in cD1_2d:
            assert_array_equal(row, cD1)
        for row in cD2_2d:
            assert_array_equal(row, cD2)

        # test SWT of 2D data along other axis (0)
        x_2d = np.asarray(x).reshape((-1, 1))
        x_2d = np.concatenate((x_2d, )*5, axis=1)
        if order == 'C':
            x_2d = np.ascontiguousarray(x_2d)
        elif order == 'F':
            x_2d = np.asfortranarray(x_2d)
        (cA2_2d, cD2_2d), (cA1_2d, cD1_2d) = pywt.swt(x_2d, db1, level=2,
                                                      axis=0)

        for c in [cA2_2d, cD2_2d, cA1_2d, cD1_2d]:
            assert_(c.shape == x_2d.shape)
        # each column should match the 1D result
        for row in cA1_2d.transpose((1, 0)):
            assert_array_equal(row, cA1)
        for row in cA2_2d.transpose((1, 0)):
            assert_array_equal(row, cA2)
        for row in cD1_2d.transpose((1, 0)):
            assert_array_equal(row, cD1)
        for row in cD2_2d.transpose((1, 0)):
            assert_array_equal(row, cD2)

    # axis too large
    assert_raises(ValueError, pywt.swt, x, db1, level=2, axis=5)
def c_dists(Y,use_swt=True,level_weights=False):
	w = pywt.Wavelet('sym2')
	if use_swt:
		L = pywt.swt_max_level(Y.shape[0])
		C = [pywt.swt(Y[:,i],w,level=L) for i in range(Y.shape[1])]
		C = [[list(reshape(l[0],-1)) + list(reshape(l[1],-1)) for l in c] for c in C]
	else:
		L = pywt.dwt_max_level(Y.shape[0],w)
		C = [pywt.wavedec(Y[:,i],w,level=L) for i in range(Y.shape[1])]
	if level_weights:
		if use_swt:
			raise NameError('No level weights with SWT')
		Wc = [1. for x in range(1,L+1)]
		D = zeros((len(C),len(C)))
		for i in range(len(C)):
			for j in range(i+1,len(C)):
				d = sum([distance.cosine(C[i][x],C[j][x])*Wc[x] for x in range(L)])/sum(Wc)
				D[i,j] = d
				D[j,i] = d
		return D
	else:
		Cn = []
		for c in C:
			cn = []
			for l in c:
				cn += list(l)
			Cn.append(cn)
		return abs(pdist(Cn,'cosine'))
Esempio n. 6
0
 def setup(self, n, wavelet):
     try:
         from pywt import iswt
     except ImportError:
         raise NotImplementedError("iswt not available")
     super(IswtTimeSuite, self).setup(n, wavelet)
     self.coeffs = pywt.swt(self.data, wavelet)
def waveletDecomp(series):
    # c6, k6, k5, k4 ,k3, k2, k1 = pywt.wavedec(series,"db6",level=6)
    (c6,k6),(c5,k5),(c4,k4),(c3,k3),(c2,k2),(c1,k1)=pywt.swt(series,"haar",level=6)
    wave_matrix = []
    for i in range(10):
        #count = int(32/len(k6))
        #map(lambda n:[wave_matrix.append(n) for i in range(count)], k6)
        map(lambda n:wave_matrix.append(n), k6)
    for i in range(10):
        #count = int(32/len(k5))
        #map(lambda n:[wave_matrix.append(n) for i in range(count), k5)
        map(lambda n:wave_matrix.append(n), k5)
    for i in range(11):
        #count = int(32/len(k4))
        #map(lambda n:[wave_matrix.append(n) for i in range(count)], k4)
        map(lambda n:wave_matrix.append(n), k4)
    for i in range(11):
        #count = int(32/len(k3))
        #map(lambda n:[wave_matrix.append(n) for i in range(count)], k3)
        map(lambda n:wave_matrix.append(n), k3)
    for i in range(11):
        #count = int(32/len(k2))
        #map(lambda n:[wave_matrix.append(n) for i in range(count)], k2)
        map(lambda n:wave_matrix.append(n), k2)
    for i in range(11):
        #count = int(32/len(k1))
        #map(lambda n:[wave_matrix.append(n) for i in range(count)], k1)
        map(lambda n:wave_matrix.append(n), k1)
    return wave_matrix
Esempio n. 8
0
def test_swt_iswt_integration():
    # This function performs a round-trip swt/iswt transform test on
    # all available types of wavelets in PyWavelets - except the
    # 'dmey' wavelet. The latter has been excluded because it does not
    # produce very precise results. This is likely due to the fact
    # that the 'dmey' wavelet is a discrete approximation of a
    # continuous wavelet. All wavelets are tested up to 3 levels. The
    # test validates neither swt or iswt as such, but it does ensure
    # that they are each other's inverse.

    max_level = 3
    wavelets = pywt.wavelist()
    if 'dmey' in wavelets:
        # The 'dmey' wavelet seems to be a bit special - disregard it for now
        wavelets.remove('dmey')
    for current_wavelet_str in wavelets:
        current_wavelet = pywt.Wavelet(current_wavelet_str)
        input_length_power = int(np.ceil(np.log2(max(
            current_wavelet.dec_len,
            current_wavelet.rec_len))))
        input_length = 2**(input_length_power + max_level - 1)
        X = np.arange(input_length)
        coeffs = pywt.swt(X, current_wavelet, max_level)
        Y = pywt.iswt(coeffs, current_wavelet)
        assert_allclose(Y, X, rtol=1e-5, atol=1e-7)
def get_waveletfeatures(data, w, use_dwt=True):
    #Show dwt or swt coefficients for given data and wavelet.
    w = pywt.Wavelet(w)
    a = data
    ca = []
    cd = []

    if use_dwt:
        for i in range(5):
            (a, d) = pywt.dwt(a, w, mode)
            ca.append(a)
            cd.append(d)
    else:
        coeffs = pywt.swt(data, w, 5)  # [(cA5, cD5), ..., (cA1, cD1)]
        for a, d in reversed(coeffs):
            ca.append(a)
            cd.append(d)
    
    wave_features = []
    for i in range(len(ca)): #ca1 - ca5
        c_Dsquares = []
        for j in range(len(cd[i])):
            c_Dsquares.append((cd[i][j])**2)
        c_Dsumsquares=sum(c_Dsquares)
        wave_features.append(c_Dsumsquares)
    return wave_features # Returns 
def random():
    fig, (ax_w, ax_f) = plt.subplots(2)

    distributions = [.2, .4, .6, .8, .99]
    colors = ['b', 'r', 'g', 'm', 'c']

    for d,c in zip(distributions, colors):
        x_wavelet, y_wavelet, x_fourier, y_fourier = [],[],[],[]
        for i in range(int(1e3)):
            signal = np.random.rand(1e5) > d
            (cA2, cD2), (cA1, cD1) = pywt.swt(signal, 'haar', level=2)
            x_wavelet.append(np.mean(cA1))
            y_wavelet.append(np.mean(cA2))

            fourier = np.fft.fft(signal)
            freqs = np.fft.fftfreq(len(fourier))
            peaks = argrelextrema(np.abs(fourier), np.greater)

            x_fourier.append(freqs[peaks[0][0]])
            y_fourier.append(freqs[peaks[0][1]])
            
        ax_w.scatter(x_wavelet, y_wavelet, marker='o', c=c, label='w: rand(1e5) > {}'.format(d), linewidth=0)
        ax_f.scatter(x_fourier, y_fourier, marker='+', c=c, label='f: rand(1e5) > {}'.format(d))

    ax_w.set_title('Random signals')
    ax_w.set_xlabel('Wavelet transform')
    ax_f.set_xlabel('Fourier transform')
    #plt.legend()

    plt.show()
Esempio n. 11
0
def stationary_hard_filter (y, sigma, tau, level=3):
    threshold= tau * sigma    
    coeffs = pywt.swt(y, 'db6', level)
    hcoeffs =[]
    for scale, x in enumerate(coeffs):
        hcoeffs.append(pywt.thresholding.hard(x, threshold))
    return iswt(hcoeffs, 'db6') 
Esempio n. 12
0
def test_iswt_mixed_dtypes():
    # Mixed precision inputs give double precision output
    x_real = np.arange(16).astype(np.float64)
    x_complex = x_real + 1j*x_real
    wav = 'sym2'
    for dtype1, dtype2 in [(np.float64, np.float32),
                           (np.float32, np.float64),
                           (np.float16, np.float64),
                           (np.complex128, np.complex64),
                           (np.complex64, np.complex128)]:

        if dtype1 in [np.complex64, np.complex128]:
            x = x_complex
            output_dtype = np.complex128
        else:
            x = x_real
            output_dtype = np.float64

        coeffs = pywt.swt(x, wav, 2)
        # different precision for the approximation coefficients
        coeffs[0] = [coeffs[0][0].astype(dtype1),
                     coeffs[0][1].astype(dtype2)]
        y = pywt.iswt(coeffs, wav)
        assert_equal(output_dtype, y.dtype)
        assert_allclose(y, x, rtol=1e-3, atol=1e-3)
Esempio n. 13
0
def test_swt_default_level_by_axis():
    # make sure default number of levels matches the max level along the axis
    wav = 'db2'
    x = np.ones((2**3, 2**4, 2**5))
    for axis in (0, 1, 2):
        sdec = pywt.swt(x, wav, level=None, start_level=0, axis=axis)
        assert_equal(len(sdec), pywt.swt_max_level(x.shape[axis]))
Esempio n. 14
0
def calculate_suitable_lvl(data, wv, r, swt=True):
    # stationary
    if swt:
        max_lvl = pywt.swt_max_level(len(data))

        lvl = 1
        ent = []
        pre_e = entropy(pywt.swt(data, wv, lvl), r)
        ent.append(pre_e)
        lvl += 1
        while True:
            new_e = entropy(pywt.swt(data, wv, lvl), r)
            ent.append(new_e)
            if lvl < max_lvl:
                lvl += 1
            else:
                break

        e_sorted = sorted(ent[:])
        median = e_sorted[len(e_sorted) / 2]
        lvl = ent.index(median) + 1
    # discrete
    else:
        lvl = 1
        data_e = entropy(data, r, True)
        max_lvl = pywt.dwt_max_level(len(data), wv)

        while True:
            new_e = entropy(pywt.dwt(data, wv, lvl)[lvl - 1], r, True)
            if new_e > data_e:
                break
            elif lvl == max_lvl:
                break
            else:
                lvl += 1

    if lvl == max_lvl:
        pass

    return lvl
Esempio n. 15
0
def get_cdpp(total_ratio):
    length = total_ratio.shape[0]
    level=12
    cdpp_list = []
    for trail_length in [6, 12, 24]:
        signal_origin = np.zeros(length)
        for i in range(1500, 1500+trail_length):
            signal_origin[i] = 1.

        ratio = signal_extension(total_ratio)
        signal = signal_extension(signal_origin)

        wavelet = pywt.Wavelet('db6')

        swc_ratio = pywt.swt(ratio, wavelet, level)
        swc_signal = pywt.swt(signal, wavelet, level)
        x = []
        s = []
        for i in range(0, level):
            x.append(swc_ratio[level-1-i][1])
            s.append(swc_signal[level-1-i][1])
        x.append(swc_ratio[0][0])
        s.append(swc_signal[0][0])

        K = 50*trail_length
        sigma = variance(x, K)
        
        M = len(s)
        D = np.zeros(8192)
        for i in range(1, M+1):
            power = np.min([i, M-1])
            D += (2**(-power))*np.convolve(sigma[i-1]**(-1), s[i-1]**2, 'same')
        cdpp = (1e6)*np.sqrt(D)**(-1)
        rms_cdpp = math.sqrt(np.mean(cdpp**2))
        cdpp_list.append(rms_cdpp)
    return cdpp_list
Esempio n. 16
0
def test_swt_decomposition():
    x = [3, 7, 1, 3, -2, 6, 4, 6]
    db1 = pywt.Wavelet("db1")
    (cA2, cD2), (cA1, cD1) = pywt.swt(x, db1, level=2)
    assert_allclose(
        cA1, [7.07106781, 5.65685425, 2.82842712, 0.70710678, 2.82842712, 7.07106781, 7.07106781, 6.36396103]
    )
    assert_allclose(
        cD1, [-2.82842712, 4.24264069, -1.41421356, 3.53553391, -5.65685425, 1.41421356, -1.41421356, 2.12132034]
    )
    expected_cA2 = [7, 4.5, 4, 5.5, 7, 9.5, 10, 8.5]
    assert_allclose(cA2, expected_cA2, rtol=1e-12)
    expected_cD2 = [3, 3.5, 0, -4.5, -3, 0.5, 0, 0.5]
    assert_allclose(cD2, expected_cD2, rtol=1e-12, atol=1e-14)

    # level=1, start_level=1 decomposition should match level=2
    res = pywt.swt(cA1, db1, level=1, start_level=1)
    cA2, cD2 = res[0]
    assert_allclose(cA2, expected_cA2, rtol=1e-12)
    assert_allclose(cD2, expected_cD2, rtol=1e-12, atol=1e-14)

    coeffs = pywt.swt(x, db1)
    assert_(len(coeffs) == 3)
    assert_(pywt.swt_max_level(len(x)) == 3)
def dwt_swt_plot(data, w, filename,DWT,mode='sp1',lev=4):
    w = pywt.Wavelet(w)
    a = data
    ca = []
    cd = []

    if DWT:
        for i in xrange(5):
            (a, d) = pywt.dwt(a, w, mode=mode)
            ca.append(a)
            cd.append(d)
    else:
        coeffs = pywt.swt(data, w) #level=lev # [(cA5, cD5), ..., (cA1, cD1)]
        for a, d in reversed(coeffs):
            ca.append(a)
            cd.append(d)
                
    pylab.figure()
    ax_main = pylab.subplot(len(ca) + 1, 1, 1)
    pylab.title(filename)
    ax_main.plot(data)
    pylab.xlim(0, len(data) - 1)
    
    for i, x in enumerate(ca):
        ax = pylab.subplot(len(ca) + 1, 2, 3 + i * 2)
        ax.plot(x, 'r')
        if DWT:
            pylab.xlim(0, len(x) - 1)
        else:
            pylab.xlim(w.dec_len * i, len(x) - 1 - w.dec_len * i)
            pylab.ylabel("A%d" % (i + 1))
                        
    for i, x in enumerate(cd):
        ax = pylab.subplot(len(cd) + 1, 2, 4 + i * 2)
        ax.plot(x, 'g')
        pylab.xlim(0, len(x) - 1)
        if DWT:
            pylab.ylim(min(0, 1.4 * min(x)), max(0, 1.4 * max(x)))
        else:  # SWT
            pylab.ylim(
                min(0, 2 * min(
                    x[w.dec_len * (1 + i):len(x) - w.dec_len * (1 + i)])),
                max(0, 2 * max(
                    x[w.dec_len * (1 + i):len(x) - w.dec_len * (1 + i)]))
                                                                )
            pylab.ylabel("D%d" % (i + 1))
            
    pylab.savefig(filename+'.pdf')
Esempio n. 18
0
 def do_swt_and_find_characteristic_points(self, signal, sample_rate,
                                           max_bpm):
     self.reinit()
     swt = pywt.swt(data=signal,
                    wavelet=self.wavelet,
                    level=4,
                    start_level=0,
                    axis=-1)
     levels = len(swt)
     coeffs = [
         swt[levels - 1][1], swt[levels - 2][1], swt[levels - 3][1],
         swt[levels - 4][1]
     ]
     ps, qs, rs, ss, ts = self.find_characteristic_points(coeffs=coeffs,
                                                          start_sample=0)
     return ps, qs, rs, ss, ts, coeffs
Esempio n. 19
0
def test_swt_dtypes():
    wavelet = pywt.Wavelet('haar')
    for dt_in, dt_out in zip(dtypes_in, dtypes_out):
        errmsg = "wrong dtype returned for {0} input".format(dt_in)

        # swt
        x = np.ones(8, dtype=dt_in)
        (cA2, cD2), (cA1, cD1) = pywt.swt(x, wavelet, level=2)
        assert_(cA2.dtype == cD2.dtype == cA1.dtype == cD1.dtype == dt_out,
                "swt: " + errmsg)

        # swt2
        x = np.ones((8, 8), dtype=dt_in)
        cA, (cH, cV, cD) = pywt.swt2(x, wavelet, level=1)[0]
        assert_(cA.dtype == cH.dtype == cV.dtype == cD.dtype == dt_out,
                "swt2: " + errmsg)
Esempio n. 20
0
def test_swt_variance_and_energy_preservation():
    """Verify that the 1D SWT partitions variance among the coefficients."""
    # When norm is True and the wavelet is orthogonal, the sum of the
    # variances of the coefficients should equal the variance of the signal.
    wav = 'db2'
    rstate = np.random.RandomState(5)
    x = rstate.randn(256)
    coeffs = pywt.swt(x, wav, trim_approx=True, norm=True)
    variances = [np.var(c) for c in coeffs]
    assert_allclose(np.sum(variances), np.var(x))

    # also verify L2-norm energy preservation property
    assert_allclose(np.linalg.norm(x), np.linalg.norm(np.concatenate(coeffs)))

    # non-orthogonal wavelet with norm=True raises a warning
    assert_warns(UserWarning, pywt.swt, x, 'bior2.2', norm=True)
Esempio n. 21
0
def test_swt_dtypes():
    wavelet = pywt.Wavelet('haar')
    for dt_in, dt_out in zip(dtypes_in, dtypes_out):
        errmsg = "wrong dtype returned for {0} input".format(dt_in)

        # swt
        x = np.ones(8, dtype=dt_in)
        (cA2, cD2), (cA1, cD1) = pywt.swt(x, wavelet, level=2)
        assert_(cA2.dtype == cD2.dtype == cA1.dtype == cD1.dtype == dt_out,
                "swt: " + errmsg)

        # swt2
        x = np.ones((8, 8), dtype=dt_in)
        cA, (cH, cV, cD) = pywt.swt2(x, wavelet, level=1)[0]
        assert_(cA.dtype == cH.dtype == cV.dtype == cD.dtype == dt_out,
                "swt2: " + errmsg)
Esempio n. 22
0
def test_swt_roundtrip_dtypes():
    # verify perfect reconstruction for all dtypes
    rstate = np.random.RandomState(5)
    wavelet = pywt.Wavelet('haar')
    for dt_in, dt_out in zip(dtypes_in, dtypes_out):
        # swt, iswt
        x = rstate.standard_normal((8, )).astype(dt_in)
        c = pywt.swt(x, wavelet, level=2)
        xr = pywt.iswt(c, wavelet)
        assert_allclose(x, xr, rtol=1e-6, atol=1e-7)

        # swt2, iswt2
        x = rstate.standard_normal((8, 8)).astype(dt_in)
        c = pywt.swt2(x, wavelet, level=2)
        xr = pywt.iswt2(c, wavelet)
        assert_allclose(x, xr, rtol=1e-6, atol=1e-7)
Esempio n. 23
0
def denoise_waveform(wf_array, flatTimeSamples):
  #should already by a numpy array
  threshold_list = get_threshold_list()

  swt_output = pywt.swt(wf_array, wl, level=levels)
  # threshold the SWT coefficients
  apply_threshold(swt_output, 1., threshold_list)
  # inverse transform
  cA_thresh = iswt(swt_output, wl)
  
  wf_array = cA_thresh

  #re-baseline-subtract
  wf_array -= np.mean(wf_array[:flatTimeSamples])

  return wf_array
Esempio n. 24
0
def test_swt_roundtrip_dtypes():
    # verify perfect reconstruction for all dtypes
    rstate = np.random.RandomState(5)
    wavelet = pywt.Wavelet('haar')
    for dt_in, dt_out in zip(dtypes_in, dtypes_out):
        # swt, iswt
        x = rstate.standard_normal((8, )).astype(dt_in)
        c = pywt.swt(x, wavelet, level=2)
        xr = pywt.iswt(c, wavelet)
        assert_allclose(x, xr, rtol=1e-6, atol=1e-7)

        # swt2, iswt2
        x = rstate.standard_normal((8, 8)).astype(dt_in)
        c = pywt.swt2(x, wavelet, level=2)
        xr = pywt.iswt2(c, wavelet)
        assert_allclose(x, xr, rtol=1e-6, atol=1e-7)
Esempio n. 25
0
def plot(data, w, title):
    w = pywt.Wavelet(w)
    a = data
    ca = []
    cd = []

    if DWT:
        for i in range(5):
            (a, d) = pywt.dwt(a, w, mode)
            ca.append(a)
            cd.append(d)
    else:
        coeffs = pywt.swt(data, w, 5)  # [(cA5, cD5), ..., (cA1, cD1)]
        for a, d in reversed(coeffs):
            ca.append(a)
            cd.append(d)

    pylab.figure()
    ax_main = pylab.subplot(len(ca) + 1, 1, 1)
    pylab.title(title)
    ax_main.plot(data)
    pylab.xlim(0, len(data) - 1)

    for i, x in enumerate(ca):
        ax = pylab.subplot(len(ca) + 1, 2, 3 + i * 2)
        ax.plot(x, 'r')
        if DWT:
            pylab.xlim(0, len(x) - 1)
        else:
            pylab.xlim(w.dec_len * i, len(x) - 1 - w.dec_len * i)
        pylab.ylabel("A%d" % (i + 1))

    for i, x in enumerate(cd):
        ax = pylab.subplot(len(cd) + 1, 2, 4 + i * 2)
        ax.plot(x, 'g')
        pylab.xlim(0, len(x) - 1)
        if DWT:
            pylab.ylim(min(0, 1.4 * min(x)), max(0, 1.4 * max(x)))
        else:  # SWT
            pylab.ylim(
                min(
                    0, 2 * min(x[w.dec_len * (1 + i):len(x) - w.dec_len *
                                 (1 + i)])),
                max(
                    0, 2 * max(x[w.dec_len * (1 + i):len(x) - w.dec_len *
                                 (1 + i)])))
        pylab.ylabel("D%d" % (i + 1))
Esempio n. 26
0
    def _swt(self, sequence):
        """ Use stationary wavelet transform method. """

        details_filename = self.options.out_filename + '_details.txt'
        details_handle = open(details_filename, 'w')

        padded_sequence = self._pad_sequence(sequence)
        sequence_encoding = self.encode_sequence(padded_sequence)
        level = pywt.swt_max_level(len(sequence_encoding))
        coeffs = pywt.swt(sequence_encoding, 'haar', level)

        for (cA, cD) in coeffs:
            approx_handle.write('\t'.join(str(a) for a in cA) + '\n')
            details_handle.write('\t'.join(str(d) for d in cD) + '\n')

        details_handle.close()
        return details_filename
Esempio n. 27
0
	def _swt(self, sequence):
		""" Use stationary wavelet transform method. """

		details_filename = self.options.out_filename + '_details.txt'
		details_handle = open(details_filename, 'w')

		padded_sequence = self._pad_sequence(sequence)
		sequence_encoding = self.encode_sequence(padded_sequence)
		level = pywt.swt_max_level(len(sequence_encoding))
		coeffs = pywt.swt(sequence_encoding, 'haar', level)

		for (cA, cD) in coeffs:
			approx_handle.write('\t'.join(str(a) for a in cA) + '\n')
			details_handle.write('\t'.join(str(d) for d in cD) + '\n')

		details_handle.close()
		return details_filename
def denoise(x):
    output = pywt.swt(x, 'db2', level=2)
    threshold_list = measure_threshold(output)
    apply_threshold(output, 2.1, threshold_list)
    x_hat = iswt(output, 'db2')
    x_hat = smooth(x_hat)

    # cA, cD = pywt.dwt(x, 'db4')
    #
    #
    # # print(cA)
    # # print(cD)
    #
    # # print(cA)
    # x_hat = pywt.idwt(None, cD, 'db4')

    return x_hat
Esempio n. 29
0
def _ecg_findpeaks_kalidas(signal, sampling_rate=1000):
    """From https://github.com/berndporr/py-ecg-detectors/

    - Vignesh Kalidas and Lakshman Tamil (2017). Real-time QRS detector using Stationary Wavelet Transform
      for Automated ECG Analysis. In: 2017 IEEE 17th International Conference on Bioinformatics and
      Bioengineering (BIBE). Uses the Pan and Tompkins thresolding.

    """
    # Try loading pywt
    try:
        import pywt
    except ImportError:
        raise ImportError(
            "NeuroKit error: ecg_findpeaks(): the 'PyWavelets' module is required for"
            " this method to run. Please install it first (`pip install PyWavelets`)."
        )

    swt_level = 3
    padding = -1
    for i in range(1000):
        if (len(signal) + i) % 2**swt_level == 0:
            padding = i
            break

    if padding > 0:
        signal = np.pad(signal, (0, padding), "edge")
    elif padding == -1:
        print("Padding greater than 1000 required\n")

    swt_ecg = pywt.swt(signal, "db3", level=swt_level)
    swt_ecg = np.array(swt_ecg)
    swt_ecg = swt_ecg[0, 1, :]

    squared = swt_ecg * swt_ecg

    f1 = 0.01 / sampling_rate
    f2 = 10 / sampling_rate

    b, a = scipy.signal.butter(3, [f1 * 2, f2 * 2], btype="bandpass")
    filtered_squared = scipy.signal.lfilter(b, a, squared)

    filt_peaks = _ecg_findpeaks_peakdetect(filtered_squared, sampling_rate)

    filt_peaks = np.array(filt_peaks, dtype="int")
    return filt_peaks
Esempio n. 30
0
 def decompose(self, signal):
     pth = ['']
     self._coeff_dict[''] = np.squeeze(signal)
     for l in range(self._max_level):
         pth_new = []
         for p in pth:
             coeff = pywt.swt(self._coeff_dict[p],
                              wavelet=self._wavelet,
                              level=self._max_level - len(p),
                              start_level=len(p))
             p_run = p
             for i, C in enumerate(coeff[::-1]):
                 self._coeff_dict[p_run + 'A'] = C[0]
                 self._coeff_dict[p_run + 'D'] = C[1]
                 if i < len(coeff) - 1 and len(p_run) < self._max_level - 1:
                     pth_new.append(p_run + 'D')
                     p_run = p_run + 'A'
         pth = list(pth_new)
Esempio n. 31
0
def pywt_swt(signal):
    """
    :param signal: 形状:(time_step, 1)
    :return:
    """
    signal = np.reshape(signal, (-1, ))
    if len(signal) % 2 != 0:
        signal = np.concatenate([signal, np.array([0])])
    ca = []
    cd = []
    w = pywt.Wavelet('db4')
    coeffs = pywt.swt(signal, w, 1)  # [(cA1, cD1)]
    for a, d in reversed(coeffs):
        ca.append(a)
        cd.append(d)
    signal = pywt.waverec(ca, w)
    signal = np.reshape(signal, (-1, 1))
    return signal
    def nonQRS_swt(self,rawsig,expert_marklist,wavelet = 'db6',MaxLevel = 9):
        '''Get swt without QRS regions, modify rawsig whthin this function.'''
        # Get Swt coef for rawsig.
        rawsig = self.getNonQRSsig(rawsig,expert_marklist)
        rawsig = self.crop_data_for_swt(rawsig)

        coeflist = pywt.swt(rawsig,wavelet,MaxLevel)
        cAlist,cDlist = zip(*coeflist)
        # append to self.cDlist
        self.cAlist = []
        self.cDlist = []
        for ind in xrange(0,len(cAlist)):
            self.cAlist.append(cAlist[ind].tolist())
            self.cDlist.append(cDlist[ind].tolist())

        self.cDlist = self.cDlist[::-1]
        self.cAlist = self.cAlist[::-1]
        return None
Esempio n. 33
0
def plot_coeffs(data, w, title, use_dwt=True):
    """Show dwt or swt coefficients for given data and wavelet."""
    w = pywt.Wavelet(w)
    a = data
    ca = []
    cd = []

    if use_dwt:
        for i in range(5):
            (a, d) = pywt.dwt(a, w, mode)
            ca.append(a)
            cd.append(d)
    else:
        coeffs = pywt.swt(data, w, 5)  # [(cA5, cD5), ..., (cA1, cD1)]
        for a, d in reversed(coeffs):
            ca.append(a)
            cd.append(d)

    fig = plt.figure()
    ax_main = fig.add_subplot(len(ca) + 1, 1, 1)
    ax_main.set_title(title)
    ax_main.plot(data)
    ax_main.set_xlim(0, len(data) - 1)

    for i, x in enumerate(ca):
        ax = fig.add_subplot(len(ca) + 1, 2, 3 + i * 2)
        ax.plot(x, 'r')
        ax.set_ylabel("A%d" % (i + 1))
        if use_dwt:
            ax.set_xlim(0, len(x) - 1)
        else:
            ax.set_xlim(w.dec_len * i, len(x) - 1 - w.dec_len * i)

    for i, x in enumerate(cd):
        ax = fig.add_subplot(len(cd) + 1, 2, 4 + i * 2)
        ax.plot(x, 'g')
        ax.set_ylabel("D%d" % (i + 1))
        # Scale axes
        ax.set_xlim(0, len(x) - 1)
        if use_dwt:
            ax.set_ylim(min(0, 1.4 * min(x)), max(0, 1.4 * max(x)))
        else:
            vals = x[w.dec_len * (1 + i):len(x) - w.dec_len * (1 + i)]
            ax.set_ylim(min(0, 2 * min(vals)), max(0, 2 * max(vals)))
Esempio n. 34
0
def plot_coeffs(data, w, title, use_dwt=True):
    """Show dwt or swt coefficients for given data and wavelet."""
    w = pywt.Wavelet(w)
    a = data
    ca = []
    cd = []

    if use_dwt:
        for i in range(5):
            (a, d) = pywt.dwt(a, w, mode)
            ca.append(a)
            cd.append(d)
    else:
        coeffs = pywt.swt(data, w, 5)  # [(cA5, cD5), ..., (cA1, cD1)]
        for a, d in reversed(coeffs):
            ca.append(a)
            cd.append(d)

    fig = plt.figure()
    ax_main = fig.add_subplot(len(ca) + 1, 1, 1)
    ax_main.set_title(title)
    ax_main.plot(data)
    ax_main.set_xlim(0, len(data) - 1)

    for i, x in enumerate(ca):
        ax = fig.add_subplot(len(ca) + 1, 2, 3 + i * 2)
        ax.plot(x, 'r')
        ax.set_ylabel("A%d" % (i + 1))
        if use_dwt:
            ax.set_xlim(0, len(x) - 1)
        else:
            ax.set_xlim(w.dec_len * i, len(x) - 1 - w.dec_len * i)

    for i, x in enumerate(cd):
        ax = fig.add_subplot(len(cd) + 1, 2, 4 + i * 2)
        ax.plot(x, 'g')
        ax.set_ylabel("D%d" % (i + 1))
        # Scale axes
        ax.set_xlim(0, len(x) - 1)
        if use_dwt:
            ax.set_ylim(min(0, 1.4 * min(x)), max(0, 1.4 * max(x)))
        else:
            vals = x[w.dec_len * (1 + i):len(x) - w.dec_len * (1 + i)]
            ax.set_ylim(min(0, 2 * min(vals)), max(0, 2 * max(vals)))
Esempio n. 35
0
def plot(data, w, title):
	w = pywt.Wavelet(w)
	a = data
	ca = []
	cd = []
	if DWT:
		for i in xrange(5):
			(a, d) = pywt.dwt(a, w, mode)
			ca.append(a)
			cd.append(d)
	else:
		for a,d in pywt.swt(data, w, 5):
			ca.append(a)
			cd.append(d)
			
	pylab.figure()
	ax_main = pylab.subplot(len(ca)+1,1,1)
	pylab.title(title)
	ax_main.plot(data)
	pylab.xlim(0, len(data)-1)

	for i, x in enumerate(ca):
		#print len(data), len(x), len(data) / (2**(i+1))
		lims = -(len(data) / (2.**(i+1)) - len(x)) / 2.
		ax = pylab.subplot(len(ca)+1, 2, 3+i*2)
		ax.plot(x, 'r')
		if DWT:
			pylab.xlim(0, len(x)-1)
		else:
			pylab.xlim(w.dec_len*i, len(x)-1-w.dec_len*i)
		pylab.ylabel("A%d" % (i+1))

	for i, x in enumerate(cd):
		ax = pylab.subplot(len(cd)+1, 2, 4+i*2)
		ax.plot(x, 'g')
		pylab.xlim(0, len(x)-1)
		if DWT:
			pylab.ylim(min(0,1.4*min(x)), max(0,1.4*max(x)))
		else: #SWT
			pylab.ylim(
				min(0, 2*min(x[w.dec_len*(1+i):len(x)-w.dec_len*(1+i)])),
				max(0, 2*max(x[w.dec_len*(1+i):len(x)-w.dec_len*(1+i)]))
			)
		pylab.ylabel("D%d" % (i+1))
Esempio n. 36
0
    def _detect_r_peaks(self, ds):
        wav = pywt.Wavelet('haar')
        # Multi resolution decomposition of the signal to improve SNR
        coeffs = pywt.swt(ds.data.flatten(),
                          wavelet=wav,
                          level=self._dwt_level)
        det = coeffs[3][1]**2
        # Threshold where to define a peak as R
        thr = 1.5 * np.std(det)
        cursor = 0

        while cursor < len(det):
            if det[cursor] >= thr:
                # Add peak as candidate
                ds.r.append(cursor)
                # Skip next samples due to physiological refractory period
                cursor += self._ref_time
            else:
                cursor += 1
Esempio n. 37
0
    def wvt_proc(self, show=False):
        num_level = int(np.log2(self.largest_base))
        slct_lvl = 3

        euclidean_distance = get_euclidean_distance(self.interp_x, self.interp_y, self.interp_z)
        wlt = pywt.Wavelet('db6')
        new_sig = pywt.swt(euclidean_distance, wavelet=wlt, level=num_level)

        # INSERT FOR LOOP FOR PLOTS
        if show:
            plt.figure()
            for i in range(1, num_level + 1):
                plt.subplot(4, 3, i)
                plt.title('Wavelet coefficient' + str(i))
                plt.plot(self.t_i, new_sig[-i][0])
            plt.pause(0.00001)

        # Get peaks-locs, compute interval
        loc_idx = pk.indexes(new_sig[-slct_lvl][0], min_dist=6, thres=0.0)
        if show:
            # Plot slected wavelet coefficient peaks
            plt.subplot(4, 3, slct_lvl)
            plt.plot(self.t_i[loc_idx], new_sig[-slct_lvl][0][loc_idx])
            plt.pause(0.000001)
        peaks = {'Time': loc_idx * 1.0 / 20.0}
        peaks_df = pd.DataFrame(peaks)

        # Compute mean interval and get heart rate with sliding window
        hr = (60.0 / (peaks_df.diff().mean().values)).flatten()
        hr_time = peaks_df.values.flatten()

        if show:
            # Show analysis
            plt.figure()
            plt.plot(hr_time, hr)
            plt.legend(['Kinect measurement', 'ECG Ground truth'])
            plt.pause(0.000001)

        if np.isnan(hr):
            assert True
        return np.mean(hr)
Esempio n. 38
0
def wavelet_t(series,std):
    name = series.name
    idx = series.index
    signal = series.values
    if std: signal = (signal - signal.mean())/(signal.std())

    coeff = pywt.swt(signal,'db4',level=3)
    coeff = np.array(coeff)

    cA3,cD3 = coeff[0][0],coeff[0][1]
    _,cD2 = coeff[1][0],coeff[1][1]
    _,cD1 = coeff[2][0],coeff[2][1]
    #----------------------------
    dict_data = {
            name:signal,
            '{}_cA3'.format(name): cA3,
            '{}_cD3'.format(name): cD3,
            '{}_cD2'.format(name): cD2,
            '{}_cD1'.format(name): cD1}
    wt = pd.DataFrame(dict_data,dtype='float32',index=idx)
    return wt
    def wavelet_transform(self):

        unfiltered_ecg = self.raw

        swt_level = 3
        padding = -1
        for i in range(1000):
            if (len(unfiltered_ecg) + i) % 2 ** swt_level == 0:
                padding = i
                break

        if padding > 0:
            unfiltered_ecg = np.pad(unfiltered_ecg, (0, padding), 'edge')
        elif padding == -1:
            print("Padding greater than 1000 required\n")

        swt_ecg = pywt.swt(unfiltered_ecg, 'db3', level=swt_level)
        swt_ecg = np.array(swt_ecg)
        swt_ecg = swt_ecg[0, 1, :]

        return swt_ecg
Esempio n. 40
0
def s_decomp(cA, wavelet, levels, omissions=([], False)): # stationary wavelet transform, AKA maximal overlap
    """
    1-dimensional stationary wavelet decompisition and reconstruction

    Parameters
    ----------
    ---Same as as decomp, not including mode---

    Returns
    -------
        1D array of reconstructed data.

    """

    if omissions[0] and max(omissions[0]) > levels:
        raise ValueError("Omission level %d is too high.  Maximum allowed is %d." % (max(omissions[0]), levels))
        
    coeffs = pywt.swt(cA, wavelet, level=levels, start_level=0)
    coeffs = omit(coeffs, omissions, stationary=True)
    
    return pywt.iswt(coeffs, wavelet)
Esempio n. 41
0
    def swt(self):
        """
        Test pypwt against pywt for SWT.
        """
        W = self.W
        levels = self.levels
        wname = self.wname

        # Forward DWT with pypwt
        logging.info("computing Wavelets from pypwt")
        t0 = time()
        W.forward()
        logging.info("Wavelets.forward took %.3f ms" % elapsed_ms(t0))

        # Forward DWT2 with pywt
        logging.info("computing swt from pywt")
        Wpy = pywt.swt(self.data, self.wname, level=levels)
        logging.info("pywt took %.3f ms" % elapsed_ms(t0))

        # Compare results
        # FIXME: Error increases when levels increase, since output is scaled.
        tol = self.tol * 2**levels

        W_coeffs = W.coeffs
        if (levels != W.levels):
            err_msg = str("compare_coeffs(): pypwt instance has %d levels while pywt instance has %d levels" % (W.levels, levels))
            logging.error(err_msg)
            raise ValueError(err_msg)
        A = Wpy[0][0]
        W_a = W_coeffs[0] if W.batched1d else W_coeffs[0].ravel()
        maxerr = _calc_errors(A, W_a, "[app]") #
        self.assertTrue(maxerr < tol, msg="[%s] something wrong with the approximation coefficients (%d levels) (errmax = %e)" % (wname, levels, maxerr))
        for i in range(levels): # wavedec2 format
            # FIXME: Error increases when levels increase, since output is scaled.
            tol = self.tol * 2**(i+1)
            D1 = Wpy[levels-i-1][1] # TODO: take the new PyWavelet swt order into account
            logging.info("%s Level %d %s" % ("-"*5, i+1, "-"*5))
            W_D1 = W_coeffs[i+1] if W.batched1d else W_coeffs[i+1].ravel()
            maxerr = _calc_errors(D1, W_D1, "[det]")
            self.assertTrue(maxerr < tol, msg="[%s] something wrong with the detail coefficients at level %d (errmax = %e)" % (wname, i+1, maxerr))
def TEST_PredictionQRS():
    recname = 'sel873'
    GroupResultFolder = os.path.join(curfolderpath, 'MultiLead4',
                                     'GroupRound1')
    QTdb = QTloader()
    rawsig = QTdb.load(recname)
    rawsig = rawsig['sig']
    with open(os.path.join(GroupResultFolder, '{}.json'.format(recname)),
              'r') as fin:
        RawResultDict = json.load(fin)
        LeadResult = RawResultDict['LeadResult']
        MarkDict = LeadResult[0]
        MarkList = Convert2ExpertFormat(MarkDict)

        # Display with 2 subplots.
        swt = SWT_NoPredictQRS(rawsig, MarkList)
        swt.swt()

        # cDlist
        wtlist = swt.cDlist[-4]

        plt.figure(1)
        # plot Non QRS ECG & SWT
        plt.subplot(211)
        plt.plot(rawsig)
        plt.plot(wtlist)
        plt.grid(True)
        # plot Original ECG
        rawsig = swt.QTdb.load(recname)
        rawsig = rawsig['sig']
        rawsig = swt.crop_data_for_swt(rawsig)
        coeflist = pywt.swt(rawsig, 'db6', 9)
        cAlist, cDlist = zip(*coeflist)
        wtlist = cDlist[-4]

        plt.subplot(212)
        plt.plot(rawsig)
        plt.plot(wtlist)
        plt.grid(True)
        plt.show()
Esempio n. 43
0
 def filtering(self, coeffTreshold):
     coeffs = pywt.swt(self.cleanData, self.wavelet, level = self.mainLevel+1)
     coeffsLen = len(coeffs)
     for i in range(len(coeffs)):
         cA, cD = coeffs[i]
         if i >= (coeffsLen - self.highNoiseLevel):
             cD = zeros(len(cA), dtype='float32')
             minSD = 0
             maxSD = 0
             snr = 0
             smoothCoef = 0
             logger.warn("filtering # noisLevel: {0}".format(i))
         else:
             minSD = ar.stdFinder(cD[self.deltaLen:], self.defaultFrame)
             maxSD = ar.getLocalPtp(cD[self.deltaLen:], self.defaultFrame*0.8)
             snr = maxSD / minSD
             smoothCoef = minSD*(coeffTreshold*(snr**(i**(0.7)/(i**(1.5)+i+1)))+i*2)
             logger.warn("filtering # minSD: {0}, maxSD: {1}, snr: {2}, level: {3}, smoothCoef: {4}".format(minSD, maxSD, snr, i, smoothCoef))
             cD = pywt.thresholding.soft(cD, smoothCoef)
         self.mysql_writer.dbWaveLevel_write(coeffTreshold, i, minSD, maxSD, smoothCoef)
         coeffs[i] = cA, cD
     return iswt(coeffs, self.wavelet)
Esempio n. 44
0
def decompose(wavelet, mems):
    for j in range(mems[0], mems[1]):
        if wavelet == 'haar':
            wlt = wavelet
        else:
            wlt = wavelet + str(j)
        # print(wavelet)
        for i in range(1, 6):
            coeffs = np.array(pywt.swt(runoff, wlt, level=i, axis=0))
            approxs = pd.DataFrame({'Discharges': coeffs[i - 1, 0, :, 0]})
            details = pd.DataFrame(coeffs[:i, 1, :, 0])
            details = details.transpose()
            # print(details.shape)
            print(wlt)
            approxs.to_csv(
                'C:/Users/TheNush07/Desktop/Work/Projects/StreamFlow Forecasting/Datasets/Decomps/Cholachguda/L{}/approxs-{}_2.csv'
                .format(i, wlt),
                index=False)
            details.to_csv(
                'C:/Users/TheNush07/Desktop/Work/Projects/StreamFlow Forecasting/Datasets/Decomps/Cholachguda/L{}/details-{}_2.csv'
                .format(i, wlt),
                index=False)
Esempio n. 45
0
def random():
    fig, (ax_w, ax_f) = plt.subplots(2)

    distributions = [.2, .4, .6, .8, .99]
    colors = ['b', 'r', 'g', 'm', 'c']

    for d, c in zip(distributions, colors):
        x_wavelet, y_wavelet, x_fourier, y_fourier = [], [], [], []
        for i in range(int(1e3)):
            signal = np.random.rand(1e5) > d
            (cA2, cD2), (cA1, cD1) = pywt.swt(signal, 'haar', level=2)
            x_wavelet.append(np.mean(cA1))
            y_wavelet.append(np.mean(cA2))

            fourier = np.fft.fft(signal)
            freqs = np.fft.fftfreq(len(fourier))
            peaks = argrelextrema(np.abs(fourier), np.greater)

            x_fourier.append(freqs[peaks[0][0]])
            y_fourier.append(freqs[peaks[0][1]])

        ax_w.scatter(x_wavelet,
                     y_wavelet,
                     marker='o',
                     c=c,
                     label='w: rand(1e5) > {}'.format(d),
                     linewidth=0)
        ax_f.scatter(x_fourier,
                     y_fourier,
                     marker='+',
                     c=c,
                     label='f: rand(1e5) > {}'.format(d))

    ax_w.set_title('Random signals')
    ax_w.set_xlabel('Wavelet transform')
    ax_f.set_xlabel('Fourier transform')
    #plt.legend()

    plt.show()
Esempio n. 46
0
File: swpt.py Progetto: kesmarag/sp
 def decompose(self, signal):
     pth = ['']
     self._coeff_dict[''] = np.squeeze(signal)
     for l in range(self._max_level):
         pth_new = []
         for p in pth:
             coeff = pywt.swt(self._coeff_dict[p],
                              wavelet=self._wavelet,
                              level=self._max_level - len(p),
                              start_level=len(p))
             p_run = p
             for i, C in enumerate(coeff[::-1]):
                 self._coeff_dict[p_run + 'A'] = C[0]
                 self._energy_dict[p_run + 'A'] = np.linalg.norm(C[0])**2
                 self._coeff_dict[p_run + 'D'] = C[1]
                 self._energy_dict[p_run + 'D'] = np.linalg.norm(C[1])**2
                 self._entropy_dict[p_run + 'A'] = 0.0
                 self._entropy_dict[p_run + 'D'] = 0.0
                 for c in C[0]:
                     self._entropy_dict[p_run + 'A'] += -100.0 * np.log(
                         c**2 / np.linalg.norm(signal, ord=2)**
                         2) * c**2 / np.linalg.norm(signal, ord=2)**2
                 self._entropy_dict[p_run + 'A'] = self._entropy_dict[
                     p_run + 'A'] / 2**(len(p_run) + 2.0)
                 for c in C[1]:
                     self._entropy_dict[p_run + 'D'] += -100.0 * np.log(
                         c**2 / np.linalg.norm(signal, ord=2)**
                         2) * c**2 / np.linalg.norm(signal, ord=2)**2
                 self._entropy_dict[p_run + 'D'] = self._entropy_dict[
                     p_run + 'D'] / 2**(len(p_run) + 2.0)
                 if i < len(coeff) - 1 and len(p_run) < self._max_level - 1:
                     pth_new.append(p_run + 'D')
                     p_run = p_run + 'A'
         pth = list(pth_new)
     energies = self._get_energies()
     for k in self._coeff_dict:
         if len(k) > 0:
             self._energy_dict[k] = self._energy_dict[k] / energies[len(k) -
                                                                    1]
Esempio n. 47
0
def swt_process_signal(sig,
                       exclude_level=0,
                       baseline_exclude_level=0,
                       total_levels=11,
                       fs=1,
                       **kwargs):

    len_orig = len(sig)
    sig, n_pad_l, n_pad_r = swt_align(sig, total_levels)
    coeffs = pywt.swt(sig, 'db4', level=total_levels)

    baseline = swt_filter(coeffs, baseline_exclude_level)
    sig_f = swt_filter(coeffs, exclude_level)
    delta = sig_f - baseline

    if n_pad_r > 0:
        baseline, sig_f = baseline[n_pad_l:-n_pad_r], sig_f[n_pad_l:-n_pad_r]
    elif n_pad_l > 0:
        baseline, sig_f = baseline[n_pad_l:], sig_f[n_pad_l:]

    ts = np.arange(len(baseline)) / fs
    return baseline, sig_f, ts
Esempio n. 48
0
    def swt_detector(self, unfiltered_ecg):
        """
        Stationary Wavelet Transform 
        based on Vignesh Kalidas and Lakshman Tamil. 
        Real-time QRS detector using Stationary Wavelet Transform 
        for Automated ECG Analysis. 
        In: 2017 IEEE 17th International Conference on 
        Bioinformatics and Bioengineering (BIBE). 
        Uses the Pan and Tompkins thresolding.
        """
        
        swt_level=3
        padding = -1
        for i in range(1000):
            if (len(unfiltered_ecg)+i)%2**swt_level == 0:
                padding = i
                break

        if padding > 0:
            unfiltered_ecg = np.pad(unfiltered_ecg, (0, padding), 'edge')
        elif padding == -1:
            print("Padding greater than 1000 required\n")    

        swt_ecg = pywt.swt(unfiltered_ecg, 'db3', level=swt_level)
        swt_ecg = np.array(swt_ecg)
        swt_ecg = swt_ecg[0, 1, :]

        squared = swt_ecg*swt_ecg

        f1 = 0.01/self.fs
        f2 = 10/self.fs

        b, a = signal.butter(3, [f1*2, f2*2], btype='bandpass')
        filtered_squared = signal.lfilter(b, a, squared)       

        filt_peaks = panPeakDetect(filtered_squared, self.fs)
        r_peaks = searchBack(filt_peaks, unfiltered_ecg, int(0.1*self.fs))

        return r_peaks
Esempio n. 49
0
    def swt_detector(self, unfiltered_ecg):
        """
        Stationary Wavelet Transform 
        based on Vignesh Kalidas and Lakshman Tamil. 
        Real-time QRS detector using Stationary Wavelet Transform 
        for Automated ECG Analysis. 
        In: 2017 IEEE 17th International Conference on 
        Bioinformatics and Bioengineering (BIBE). 
        Uses the Pan and Tompkins thresolding.
        """

        swt_level = 3
        padding = -1
        for i in range(1000):
            if (len(unfiltered_ecg) + i) % 2**swt_level == 0:
                padding = i
                break

        if padding > 0:
            unfiltered_ecg = np.pad(unfiltered_ecg, (0, padding), 'edge')
        elif padding == -1:
            print("Padding greater than 1000 required\n")

        swt_ecg = pywt.swt(unfiltered_ecg, 'db3', level=swt_level)
        swt_ecg = np.array(swt_ecg)
        swt_ecg = swt_ecg[0, 1, :]

        squared = swt_ecg * swt_ecg

        N = int(0.12 * self.fs)
        mwa = MWA(squared, N)
        mwa[:int(0.2 * self.fs)] = 0

        mwa_peaks = panPeakDetect(mwa, self.fs)

        r_peaks = searchBack(mwa_peaks, unfiltered_ecg, N)

        return r_peaks
Esempio n. 50
0
def wavelet_filtering(data, wfun, max_level=8):
    #
    padsize = int((2**max_level) * np.ceil(data.shape[0] / (2**max_level)) -
                  data.shape[0])
    #
    data_padded = np.pad(data, (0, padsize),
                         'constant',
                         constant_values=(0, 0))
    #
    wave = pywt.swt(data_padded, wfun, level=max_level, start_level=0, axis=0)
    #
    wave_m = [[
        np.zeros((data_padded.shape[0], ), dtype=float) for j in range(2)
    ] for i in range(max_level)]  #list
    wave_m[-4][1] = wave[-4][1]
    wave_m[-5][1] = wave[-5][1]
    wave_m = [tuple(wave_m[i]) for i in range(max_level)]
    #
    data_const = pywt.iswt(wave_m, wfun)
    if padsize != 0:
        data_const = data_const[:-padsize]
    #
    return data_const
Esempio n. 51
0
def swt_decomposition(df_data, wavelet, level):
    """Stationary wavelet decomposition

    Arguments:
        df_data {pandas DataFrame} -- dataframe com os dados
        wavelet {Wavelet obj ou nome} -- wavelet a ser utilizada
        level {int} -- número de decomposições

    Returns:
        numpy array -- array contendo coeficientes de aproximação e detalhe
        em mesma ordem que a função wavedec do pywt: [(cAn, cDn), ..., (cA2, cD2), (cA1, cD1)]
    """

    decomposition = []

    for col in df_data.columns:
        # extraindo coeficientes wavelets por tag
        swt_coeffs = pywt.swt(df_data[col], wavelet, level=level, axis=0)

        decomposition_tag = np.concatenate(swt_coeffs)
        decomposition.append(decomposition_tag)

    return np.concatenate(decomposition)
Esempio n. 52
0
def test_iswt_mixed_dtypes():
    # Mixed precision inputs give double precision output
    x_real = np.arange(16).astype(np.float64)
    x_complex = x_real + 1j * x_real
    wav = 'sym2'
    for dtype1, dtype2 in [(np.float64, np.float32), (np.float32, np.float64),
                           (np.float16, np.float64),
                           (np.complex128, np.complex64),
                           (np.complex64, np.complex128)]:

        if dtype1 in [np.complex64, np.complex128]:
            x = x_complex
            output_dtype = np.complex128
        else:
            x = x_real
            output_dtype = np.float64

        coeffs = pywt.swt(x, wav, 2)
        # different precision for the approximation coefficients
        coeffs[0] = [coeffs[0][0].astype(dtype1), coeffs[0][1].astype(dtype2)]
        y = pywt.iswt(coeffs, wav)
        assert_equal(output_dtype, y.dtype)
        assert_allclose(y, x, rtol=1e-3, atol=1e-3)
#day = 24
#today = random.randint(1000, dataset.shape[0]-day*2)
#today = 4600

#See if we can predict 24 times based on instances, learned from the training set.

data_raw = sg.utils.Normalizer(dataset, axis=0)

data = data_raw.normalized[:2**14,1]

# One year is 365*24 = 8760 datapoints. If we round down to 8192, we will get
# the maximum amount of scales for the decomposition (13), i.e. math.pow(2,13)
# The number of levels/scales determine how far we look back.
level = 4

coeffs = pywt.swt(data, 'haar', level=level)

# Collect coeffecients for training. Aj = 2 is taken from the paper.

Aj = 2

# The first 2^level datapoints cannot be used to predict because of lack of history.
# level+1 because of the smooth array.
x = np.zeros((len(data) - 2**level, (level+1)*Aj))

for i in range(len(x)):
    row = []
    # Collect coefficients for each level. cAn, i.e. the smoothest array.
    for k in range(1, Aj+1):
        row.append(coeffs[-1][0][2**level + i - 2**level*(k-1)])
    # cD, the details.
def process_waveforms_in_file(input_file_name, output_file_name):

    # Initialize, setting the mode to bath to avoid any X connections
    ROOT.gROOT.SetBatch()

    # Grab the input file and tree, setting the
    # branch address for the EventBranch
    input_file = ROOT.TFile(input_file_name)
    the_tree = input_file.Get("soudan_wf_analysis")
    event = ROOT.MGTEvent()
    the_tree.SetBranchAddress("EventBranch", event)


    # Baseline Transformer
    baseline = ROOT.MGWFBaselineRemover()
    init_baseline_time = 280e3

    # Extremum Transformer
    extremum = ROOT.MGWFExtremumFinder()

    # Pulse finder Transformer
    pulse_finder = ROOT.MGWFPulseFinder()

    # Bandpass transformer, this is used to smooth
    # the shaped waveforms before energy estimation 
    first_bandpass = ROOT.MGWFBandpassFilter()
    shaped_bandpass = 0.0001 # 100 kHz low-bandpass
    first_bandpass.SetUpperBandpass(shaped_bandpass)

    # Setting parameters of the risetime
    rise = ROOT.MGWFRisetimeCalculation()
    rise.SetInitialThresholdPercentage(0.1) #10 %
    rise.SetFinalThresholdPercentage(0.9) #90%
    rise.SetInitialScanToPercentage(0.8) #80%

    # Static window, for amplitude estimation
    static_window = ROOT.MGWFStaticWindow() 

    # Smoothing derivative
    der = ROOT.MGWFSavitzkyGolaySmoother(6, 1, 2)

    # Temporary waveforms
    newwf = ROOT.MGTWaveform()
    tempder = ROOT.MGTWaveform()
    bandpass_wf = ROOT.MGTWaveform()

    # Parameters for the wavelet transformation
    wl_trans = pywt.Wavelet('haar')
    level = 6
    length_of_pulse = 30e3

    # Setup objects for writing out, TFile, TTree, etc.
    output_file = ROOT.TFile(output_file_name, "recreate")
    output_tree = ROOT.TTree("energy_output_tree", "Soudan Energy Tree")

    # Setup MGMAnalysisClasses to encapsulate the output data
    muon_veto = ROOT.MGMMuonVeto()
    channel_info = ROOT.MGMBeGeChannelInfo()
    risetime = ROOT.MGMRisetimeInfo()
    pulser_on = array.array('L', [0]) 
    # This is a hack to get a 64-bit unsigned integer
    long_array = c_ulonglong*1
    time = long_array()

    output_tree.Branch("muon_veto", muon_veto)
    output_tree.Branch("channel_info", channel_info)
    output_tree.Branch("risetime_info", risetime)
    output_tree.Branch("pulser_on", pulser_on, "pulser_on/i")
    output_tree.Branch("time", time, "time/l")


    percentageDone = 0
    numEntries = the_tree.GetEntries()

    # The events has waveforms in the following configuration:
    # 0: channel 0, shaped 6 mus, low-energy
    # 1: channel 1, shaped 10 mus, low-energy
    # 2: channel 2, shaped 10 mus, high-energy
    # 3: muon veto 
    # 4: pre-amp trace, low-energy 
    # 5: pre-amp trace, high-energy 
    for entry in range(numEntries):
        # Outputting progress, every 10 percent
        if int(entry*100/numEntries) > 10*percentageDone: 
          percentageDone += 1
          print "Done (%): ", percentageDone*10

        # Clear the analysis objects
        muon_veto.regions.clear()
        channel_info.channels.clear()
        risetime.channels.clear()

        # Grab the event from the input tree
        the_tree.GetEntry(entry)
        
        # Set pulser flags (combining two flags from initial tree)
        pulser_on[0] = ((the_tree.pulser_chunk_two != 0) or (the_tree.pulser_chunk_one != 0))

        # Set time
        time[0] = the_tree.timestamp

        # Muon VETO
        # Use the pulser finder to determine the regions of the 
        # waveform where the muon veto has fired.
        pulse_finder.SetThreshold(-0.2) # -0.2 volts, it fires negative
        pulse_finder.Transform(event.GetWaveform(3))
        for an_event in pulse_finder.GetThePulseRegions():
            muon_veto.regions.push_back(an_event)

        # All channels
        for chan_num in (0,1,2,4,5):
            baseline.SetBaselineTime(init_baseline_time) # 250 mus
            wf = event.GetWaveform(chan_num)
            extremum.SetFindMaximum(True)
            extremum.Transform(wf)

            # Find parameter of waveform, max, min, etc.
            max_value = extremum.GetTheExtremumValue()
            avg_value = max_value
            extremum.SetFindMaximum(False)
            extremum.Transform(wf)
            min_value = extremum.GetTheExtremumValue()
            baseline_factor = 1
            
            # only process the shaped waveform with a bandpass filter
            if chan_num in (0,1,2):
                first_bandpass.Transform(wf)
                extremum.SetFindMaximum(True)
                extremum.Transform(wf)
                avg_value = extremum.GetTheExtremumValue()/wf.GetLength()
                baseline_factor = wf.GetLength()
            baseline_value = baseline.GetBaseline(wf)/baseline_factor 

            # Save values in channel_info object
            channel_info.channels.push_back(
              ROOT.MGMBeGeOneChannelInfo(baseline_value, max_value, min_value, avg_value))
        
        # Preamp trace channels
        for chan_num in (4,5):
            wf = event.GetWaveform(chan_num)

            # First do a bandpass filter to grab important values
            # Grab the max and the min
            first_bandpass.Transform(wf, bandpass_wf)
            extremum.SetFindMaximum(True)
            extremum.Transform(bandpass_wf)
            rise_max = extremum.GetTheExtremumValue() 
            rise_max_pos = extremum.GetTheExtremumPoint() 

            extremum.SetFindMaximum(False)
            extremum.Transform(bandpass_wf)
            rise_min = extremum.GetTheExtremumValue() 
            rise_min_pos = extremum.GetTheExtremumPoint() 

            # Perform the wavelet smoothing
            # Get the raw data from the waveform to pass to pywt
            vec = wf.GetVectorData()
            # make the waveform a dyadic (2^N) (FixME, we are assuming
            # the waveform is 8000 entries long)
            # reduce to length 4096
            vec.erase(vec.begin(), vec.begin()+3904)

            # Stationary Wavelet Transform
            output = pywt.swt(vec, wl_trans, level=level)
            # Thresholding
            apply_threshold(output, 0.8, get_threshold_list())
            # Inverse transform
            cA = iswt(output, wl_trans)

            # Reloading into waveform, but getting a small region around
            # the known waveform rise to reduce later calculation
            newwf.SetSamplingFrequency(wf.GetSamplingFrequency())
            start = int(100e3*wf.GetSamplingFrequency())
            end = start + int(length_of_pulse*wf.GetSamplingFrequency())
            # loading waveform from start to end
            newwf.SetData(cA[start:end], end-start)

            # Now find the risetime
            # Take the derivative to zero in on the pulse
            der.Transform(newwf, tempder)
            # Find minimum (FixME, assuming negative going pulse)
            extremum.SetFindMaximum(False)
            extremum.Transform(tempder)

            # Find the FWHM
            pulse_finder.SetThreshold(0.5*extremum.GetTheExtremumValue())
            pulse_finder.Transform(tempder)

            point = extremum.GetTheExtremumPoint()

            # Find the correct region in case there are other ones that have been
            # found.  I.e. this is the one with the extremum point within.
            regions = pulse_finder.GetThePulseRegions()
            test_point = 0
            for region in range(regions.size()):
                if regions[region].IsInRegion(point):
                    test_point = region
                    break
    
            
            if regions.size() == 0:
                # Means no regions were found (this should never happen, but might 
                # for a wf close to noise)
                # Estimate using the derivative peak then
                # Making a 4 mus window around this point.
                start = point*newwf.GetSamplingPeriod()-2e3
                end = point*newwf.GetSamplingPeriod()+2e3 
            else:
                # We found the correct point, set the start and stop time
                # at the FWHM
                start = regions[test_point].beginning*newwf.GetSamplingPeriod()
                end = regions[test_point].end*newwf.GetSamplingPeriod()
            # Gives the HWHM (half-width at half-max)
            diff = (end - start)/2.0 

            # This extends the window to one more full width on each side  
            start -= 2*diff
            end += 2*diff

            # First estimate and subtract the baseline
            # This check is to make sure we are on the
            # the waveform still
            if start < 0: start = 0
            if start < 1e3: 
                static_window.SetDelayTime(start)
            else:
                static_window.SetDelayTime(start-1e3)

            # Estimate, subtract baseline using 1 mus integration
            static_window.SetFirstRampTime(0)
            static_window.SetSecondRampTime(1e3)
            static_window.Transform(newwf)
            newwf -= static_window.GetPeakHeight()
    
            # now grab the peak height
            if end > length_of_pulse - 1e3: end = length_of_pulse - 1e3
            static_window.SetDelayTime(end)
            static_window.SetFirstRampTime(0)
            static_window.SetSecondRampTime(1e3)
            static_window.Transform(newwf)
   
            # We have the peak height, we feed into the risetime calculator
            max_value_to_find = static_window.GetPeakHeight()
            rise.SetPulsePeakHeight(max_value_to_find)
     
            # Scan from the start position defined by the beginning
            # of the baseline estimation 
            rise.SetScanFrom(int(start*newwf.GetSamplingFrequency()))
            rise.Transform(newwf)
            rt = rise.GetRiseTime()
            start_rt = rise.GetInitialThresholdCrossing()
            stop_rt = rise.GetFinalThresholdCrossing()

            # Save in the MGMAnalysis Object
            risetime.channels.push_back(
              ROOT.MGMRisetimeOneChannelInfo(start_rt, stop_rt, rt, 
                                             rise_max, rise_min,
                                             int(rise_max_pos), int(rise_min_pos)))

        
        output_tree.Fill()
    output_file.cd()
    output_tree.Write()
Esempio n. 55
0
				for i in range(N-len(wf1)):
					wf1.append(0.)
					wf2.append(0.)
					wf3.append(0.)
			len1 = wlt.swt_max_level(N)
			n1 = len(wf1)
			len3 = wlt.swt_max_level(n1)
			len2 = wlt.dwt_max_level(N, w.dec_len)
			print(N, 'swt:',len1,'dwt:',len2,len3,n1)
			n += 1		
		for i in range(N):
			add1.append(0.)
			add2.append(0.)
			add3.append(0.)

		[(cA7, cD7),(cA6, cD6),(cA5, cD5),(cA4, cD4),(cA3, cD3),(cA2, cD2), (cA1, cD1)] = wlt.swt(wf1, wlt_type, 7, start_level=start_level)
		if(len(add_levels) > 0):
#			figadd = plt.figure(figsize=(10,6))
#			ax1=plt.subplot(511)
#			plt.title(label)
			for level in range(len(add_levels)):
				print("Level",add_levels[level])
				if(int(add_levels[level]) == 0):
					print("Adding cD7")		
					for i in range(N):
						add1[i] += cD7[i]
				if(int(add_levels[level]) == 1):
					print("Adding cD6")		
					for i in range(N):
						add1[i] += cD6[i]
				if(int(add_levels[level]) == 2):
Esempio n. 56
0
def __modelling_cycle():

    initial_data = test_data

    fig_init = plt.figure()
    fig_init.canvas.manager.set_window_title('Initial data')
    plt.plot(initial_data, color='g')
#--------------- wavelet decomposition -------------------#
    decomposition_level = 2
    wavelet_families = pywt.families()
    wavelet_family = wavelet_families[0]
    selected_wavelet = pywt.wavelist(wavelet_family)[0]

    wavelet = pywt.Wavelet(selected_wavelet)  #NB: taking first variant of wavelet (e.g. haar1)
    # discrete (non stationary) multilevel decomposition
    wCoefficients_Discrete = pywt.wavedec(initial_data, wavelet, level=decomposition_level) #NB: output length also depends on wavelet type
    # stationary (Algorithme à trous ~ does not decimate coefficients at every transformation level) multilevel decomposition
    wCoefficients_Stationary = pywt.swt(initial_data, wavelet, level=decomposition_level)

    fig_discrete = plt.figure(); n_coeff = 1
    fig_discrete.canvas.manager.set_window_title('Discrete decomposition [ ' + str(decomposition_level) + ' level(s) ]') 
    for coeff in wCoefficients_Discrete:
#        print coeff
        fig_discrete.add_subplot(len(wCoefficients_Discrete), 1, n_coeff); n_coeff += 1
        plt.plot(coeff)

    fig_stationary = plt.figure(); n_coeff = 1; rows = 0
    fig_stationary.canvas.manager.set_window_title('Stationary decomposition [ ' + str(decomposition_level) + ' level(s) ]')
    for item in wCoefficients_Stationary: rows += len(item)
    i = 0; j = 0    # tree coeffs
    for coeff in wCoefficients_Stationary:
        for subcoeff in coeff:
            print i, j
#            print subcoeff
            fig_stationary.add_subplot(rows, 1, n_coeff); n_coeff += 1
            plt.plot(subcoeff)
            j += 1
        i += 1

    plt.show()

    fig_stat_sum = plt.figure(); n_coeff = 1
    fig_stat_sum.canvas.manager.set_window_title('SWT sum by levels [ ' + str(decomposition_level) + ' level(s) ]')
    for coeff in wCoefficients_Stationary:
        sum = coeff[0] + coeff[1]
        fig_stat_sum.add_subplot(len(wCoefficients_Discrete), 1, n_coeff); n_coeff += 1
        plt.plot(sum)
        
#    plt.show()

#------------------ modelling by level -------------------#

    r = R()
    r.i_data = initial_data     # or r['i_data'] = initial_data

    ### Holt-Winters ###
    # non-seasonal Holt-Winters
    print r('hw <- HoltWinters( i_data, gamma = FALSE )')

    # seasonal Holt-Winters
    r.freq = 4  #series sampling (month, days, years, etc)
#    print r( 'hw <- HoltWinters( ts ( %s, frequency = %s ) )' % ( Str4R(r.i_data), Str4R(r.freq) ) )
#    print r( 'hw <- HoltWinters( ts ( %s, frequency = %s, start = c(1,1) ) )' % ( Str4R(r.i_data), Str4R(r.freq) ) )

    # resulting Square Estimation Sum
    print r.hw['SSE']

    # bruteforce frequency search
#    print 'test ahead:'
#    sse_dict = {}
#    for i in xrange(2, 50):
#        r.freq = i
##        r( 'hw <- HoltWinters( ts ( %s, frequency = %s, start = c(1,1) ) )' % ( Str4R(r.i_data), Str4R(r.freq) ) )
#        r( 'hw <- HoltWinters( ts ( %s, frequency = %s ) )' % ( Str4R(r.i_data), Str4R(r.freq) ) )
#        print r.hw['SSE']
#        sse_dict[r.hw['SSE']] = i; i += 1
#    print 'Resulting:'
#    m = min(sse_dict.keys())
#    print sse_dict[m], m

    fig = plt.figure()
    fig.canvas.manager.set_window_title('Holt-winters model')
    ax = fig.add_subplot(111)
#    ax.plot(r.hw['fitted'][:,0])   # the colums are: xhat, level, trend
#    plt.show()

    # forecast length
    r.steps_ahead = 50
#    print r('pred <- predict(%s, %s, prediction.interval = TRUE)' % ( Str4R(r.hw), Str4R(r.steps_ahead)) )
#    print r( 'pred <- predict(hw, %s, prediction.interval = TRUE)', Str4R(r.steps_ahead) )
    print r( 'pred <- predict(hw, 50, prediction.interval = TRUE)')
#    plt.plot(r.pred)
    ax.plot(initial_data)
    ax.plot(append(r.hw['fitted'][:,0], r.pred[:,0]))   # concatenating reconstructed model and resulting forecast

#    plt.show()

#------------------ reconstruction -------------------#
    # multilevel idwt
    reconstructed_Discrete = pywt.waverec(wCoefficients_Discrete, selected_wavelet)
    fig_dis_r = plt.figure()
    fig_dis_r.canvas.manager.set_window_title('DWT reconstruction')
    plt.plot(reconstructed_Discrete)
#    plt.show()

    # multilevel stationary
    reconstructed_Stationary = iswt(wCoefficients_Stationary, selected_wavelet)

    fig_sta_r = plt.figure()
    fig_sta_r.canvas.manager.set_window_title('SWT reconstruction')
    plt.plot(reconstructed_Stationary)
    plt.show()
    print 'end'
Esempio n. 57
0
def modelling_cycle():

#--------------- initialization -------------------#
#    initial_data = test_data
    initial_data = test_data_one

#    fig_init = plt.figure()
#    fig_init.canvas.manager.set_window_title('Initial data')
#    plt.plot(initial_data, color='g')

    wavelet_families = pywt.families()
    print 'Wavelet families:', ', '.join(wavelet_families)
    wavelet_family = wavelet_families[4]
    selected_wavelet = pywt.wavelist(wavelet_family)[0]
    wavelet = pywt.Wavelet(selected_wavelet)
    print 'Selected wavelet:', selected_wavelet

    max_level = pywt.swt_max_level(len(initial_data))
#    decomposition_level = max_level / 2
    decomposition_level = 3
    print 'Max level:', max_level, '\t Decomposition level:', decomposition_level

#--------------- decomposition -------------------#
    w_initial_coefficients = pywt.swt(initial_data, wavelet, level=decomposition_level)
    w_selected_coefficiets = select_levels_from_swt(w_initial_coefficients)
    w_node_coefficients = select_node_levels_from_swt(w_initial_coefficients)      #something terribly wrong here, yet the rest works!

#------------------ threshold --------------------#

    threshold = measure_threshold(w_initial_coefficients)

    w_threshold_coeff = w_initial_coefficients[:]
    apply_threshold(w_threshold_coeff)
    plot_initial_updated(w_initial_coefficients, w_threshold_coeff)

#    plt.figure()
#    for coeff in w_selected_coefficiets:
#        plt.plot(coeff)
#    plt.figure()
#    for coeff in w_node_coefficients:
#        plt.plot(coeff)
#    plt.show()

#--------------- modification -------------------#
    r = R()

    w_new_coefficients = [0] * len(w_selected_coefficiets)
    for index in range(0, len(w_selected_coefficiets)):
        r.i_data = w_selected_coefficiets[index]

        r('hw <- HoltWinters( ts(i_data, frequency = 12), gamma = TRUE )')
        r('pred <- predict(hw, 50, prediction.interval = TRUE)')

        w_new_coefficients[index] = append(w_selected_coefficiets[index], r.pred[:,0])
        index += 1

    w_new_node_coefficients = [0] * len(w_node_coefficients)
    for index in range(0, len(w_node_coefficients)):
        r.i_data = w_node_coefficients[index]

        r('hw <- HoltWinters( ts(i_data, frequency = 12), gamma = TRUE )')
        r('pred <- predict(hw, 50, prediction.interval = TRUE)')

        w_new_node_coefficients[index] = append(w_node_coefficients[index], r.pred[:,0])
        index += 1
#----

#    plt.figure()
#    for coeff in w_new_coefficients:
#        plt.plot(coeff)
#    plt.figure()
#    for coeff in w_new_node_coefficients:
#        plt.plot(coeff)
#    plt.show()

#--------------- reconstruction  -------------------#
#    wInitialwithUpdated_Nodes = update_node_levels_swt(w_initial_coefficients, w_new_node_coefficients)

#    plot_initial_updated(w_initial_coefficients, w_new_node_coefficients, True)
#    plot_initial_updated(w_initial_coefficients, wInitialwithUpdated_Nodes) (!)

#    plt.figure()
#    for dyad in wInitialwithUpdated_Nodes:
#        plt.plot(dyad[0])
#        plt.plot(dyad[1])
#
#    plt.figure()
#    for dyad in w_initial_coefficients:
#        plt.plot(dyad[0])
#        plt.plot(dyad[1])
#
#    plt.show()

#    w_updated_coefficients = update_selected_levels_swt(w_initial_coefficients, w_selected_coefficiets)
#    w_updated_coefficients = update_selected_levels_swt(w_initial_coefficients, w_new_coefficients)


#----
#    w_updated_coefficients = update_swt(w_initial_coefficients, w_selected_coefficiets, w_node_coefficients)

    w_updated_coefficients_nodes = update_swt(w_initial_coefficients, w_new_coefficients, w_new_node_coefficients)
    w_updated_coefficients = update_selected_levels_swt(w_initial_coefficients, w_new_coefficients)

    plot_initial_updated(w_initial_coefficients, w_updated_coefficients_nodes)
    plot_initial_updated(w_initial_coefficients, w_updated_coefficients)

    reconstructed_Stationary_nodes = iswt(w_updated_coefficients_nodes, selected_wavelet)
    reconstructed_Stationary = iswt(w_updated_coefficients, selected_wavelet)

    fig_sta_r = plt.figure()
    fig_sta_r.canvas.manager.set_window_title('SWT reconstruction')
    plt.plot(reconstructed_Stationary)

    fig_sta_r_n = plt.figure()
    fig_sta_r_n.canvas.manager.set_window_title('SWT reconstruction (nodes)')
    plt.plot(reconstructed_Stationary_nodes)

    plt.show()